Snap for 6356162 from 33215d7567f42dc64bcaca4420ca6aa3f32b0c0e to r-keystone-qcom-release

Change-Id: I7579a5f22c22da780de4a3aa8fd1d73b2aa0483d
diff --git a/darwin-x86/bin/cxx_extractor b/darwin-x86/bin/cxx_extractor
index 36b0b1e..33961e3 100755
--- a/darwin-x86/bin/cxx_extractor
+++ b/darwin-x86/bin/cxx_extractor
Binary files differ
diff --git a/darwin-x86/bin/header-abi-diff b/darwin-x86/bin/header-abi-diff
index 65698c8..fd41f02 100755
--- a/darwin-x86/bin/header-abi-diff
+++ b/darwin-x86/bin/header-abi-diff
Binary files differ
diff --git a/darwin-x86/bin/header-abi-dumper b/darwin-x86/bin/header-abi-dumper
index 2fea1d1..7911a68 100755
--- a/darwin-x86/bin/header-abi-dumper
+++ b/darwin-x86/bin/header-abi-dumper
Binary files differ
diff --git a/darwin-x86/bin/header-abi-linker b/darwin-x86/bin/header-abi-linker
index 187aee3..54c2ec4 100755
--- a/darwin-x86/bin/header-abi-linker
+++ b/darwin-x86/bin/header-abi-linker
Binary files differ
diff --git a/darwin-x86/bin/merge-abi-diff b/darwin-x86/bin/merge-abi-diff
index 429357f..e5ba411 100755
--- a/darwin-x86/bin/merge-abi-diff
+++ b/darwin-x86/bin/merge-abi-diff
Binary files differ
diff --git a/darwin-x86/bin/proto_metadata_plugin b/darwin-x86/bin/proto_metadata_plugin
index 053795c..dfdd9ff 100755
--- a/darwin-x86/bin/proto_metadata_plugin
+++ b/darwin-x86/bin/proto_metadata_plugin
Binary files differ
diff --git a/darwin-x86/bin/protoc_extractor b/darwin-x86/bin/protoc_extractor
index e4e0af3..a7c34e6 100755
--- a/darwin-x86/bin/protoc_extractor
+++ b/darwin-x86/bin/protoc_extractor
Binary files differ
diff --git a/darwin-x86/bin/versioner b/darwin-x86/bin/versioner
index a79ce10..559b3d1 100755
--- a/darwin-x86/bin/versioner
+++ b/darwin-x86/bin/versioner
Binary files differ
diff --git a/darwin-x86/clang-headers b/darwin-x86/clang-headers
index 4009f74..36c43f3 120000
--- a/darwin-x86/clang-headers
+++ b/darwin-x86/clang-headers
@@ -1 +1 @@
-lib64/clang/10.0.1/include
\ No newline at end of file
+lib64/clang/10.0.5/include
\ No newline at end of file
diff --git a/darwin-x86/lib64/clang/10.0.1/include/__clang_cuda_builtin_vars.h b/darwin-x86/lib64/clang/10.0.5/include/__clang_cuda_builtin_vars.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/__clang_cuda_builtin_vars.h
rename to darwin-x86/lib64/clang/10.0.5/include/__clang_cuda_builtin_vars.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/__clang_cuda_cmath.h b/darwin-x86/lib64/clang/10.0.5/include/__clang_cuda_cmath.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/__clang_cuda_cmath.h
rename to darwin-x86/lib64/clang/10.0.5/include/__clang_cuda_cmath.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/__clang_cuda_complex_builtins.h b/darwin-x86/lib64/clang/10.0.5/include/__clang_cuda_complex_builtins.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/__clang_cuda_complex_builtins.h
rename to darwin-x86/lib64/clang/10.0.5/include/__clang_cuda_complex_builtins.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/__clang_cuda_device_functions.h b/darwin-x86/lib64/clang/10.0.5/include/__clang_cuda_device_functions.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/__clang_cuda_device_functions.h
rename to darwin-x86/lib64/clang/10.0.5/include/__clang_cuda_device_functions.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/__clang_cuda_intrinsics.h b/darwin-x86/lib64/clang/10.0.5/include/__clang_cuda_intrinsics.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/__clang_cuda_intrinsics.h
rename to darwin-x86/lib64/clang/10.0.5/include/__clang_cuda_intrinsics.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/__clang_cuda_libdevice_declares.h b/darwin-x86/lib64/clang/10.0.5/include/__clang_cuda_libdevice_declares.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/__clang_cuda_libdevice_declares.h
rename to darwin-x86/lib64/clang/10.0.5/include/__clang_cuda_libdevice_declares.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/__clang_cuda_math_forward_declares.h b/darwin-x86/lib64/clang/10.0.5/include/__clang_cuda_math_forward_declares.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/__clang_cuda_math_forward_declares.h
rename to darwin-x86/lib64/clang/10.0.5/include/__clang_cuda_math_forward_declares.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/__clang_cuda_runtime_wrapper.h b/darwin-x86/lib64/clang/10.0.5/include/__clang_cuda_runtime_wrapper.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/__clang_cuda_runtime_wrapper.h
rename to darwin-x86/lib64/clang/10.0.5/include/__clang_cuda_runtime_wrapper.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/__stddef_max_align_t.h b/darwin-x86/lib64/clang/10.0.5/include/__stddef_max_align_t.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/__stddef_max_align_t.h
rename to darwin-x86/lib64/clang/10.0.5/include/__stddef_max_align_t.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/__wmmintrin_aes.h b/darwin-x86/lib64/clang/10.0.5/include/__wmmintrin_aes.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/__wmmintrin_aes.h
rename to darwin-x86/lib64/clang/10.0.5/include/__wmmintrin_aes.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/__wmmintrin_pclmul.h b/darwin-x86/lib64/clang/10.0.5/include/__wmmintrin_pclmul.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/__wmmintrin_pclmul.h
rename to darwin-x86/lib64/clang/10.0.5/include/__wmmintrin_pclmul.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/adxintrin.h b/darwin-x86/lib64/clang/10.0.5/include/adxintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/adxintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/adxintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/altivec.h b/darwin-x86/lib64/clang/10.0.5/include/altivec.h
similarity index 98%
rename from darwin-x86/lib64/clang/10.0.1/include/altivec.h
rename to darwin-x86/lib64/clang/10.0.5/include/altivec.h
index 4008440..7e231a2 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/altivec.h
+++ b/darwin-x86/lib64/clang/10.0.5/include/altivec.h
@@ -2761,8 +2761,8 @@
   return (vector double)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
-static __inline__ vector double __ATTRS_o_ai vec_xl_len_r(unsigned char *__a,
-                                                          size_t __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_xl_len_r(unsigned char *__a, size_t __b) {
   vector unsigned char __res =
       (vector unsigned char)__builtin_vsx_lxvll(__a, (__b << 56));
 #ifdef __LITTLE_ENDIAN__
@@ -2876,9 +2876,10 @@
 #ifdef __VSX__
 #define vec_ctf(__a, __b)                                                      \
   _Generic((__a), vector int                                                   \
-           : (vector float)__builtin_altivec_vcfsx((__a), (__b)),              \
+           : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)),  \
              vector unsigned int                                               \
-           : (vector float)__builtin_altivec_vcfux((vector int)(__a), (__b)),  \
+           : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
+                                                   (__b)),                     \
              vector unsigned long long                                         \
            : (__builtin_convertvector((vector unsigned long long)(__a),        \
                                       vector double) *                         \
@@ -2892,9 +2893,10 @@
 #else
 #define vec_ctf(__a, __b)                                                      \
   _Generic((__a), vector int                                                   \
-           : (vector float)__builtin_altivec_vcfsx((__a), (__b)),              \
+           : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)),  \
              vector unsigned int                                               \
-           : (vector float)__builtin_altivec_vcfux((vector int)(__a), (__b)))
+           : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
+                                                   (__b)))
 #endif
 
 /* vec_vcfsx */
@@ -2910,10 +2912,11 @@
 #ifdef __VSX__
 #define vec_cts(__a, __b)                                                      \
   _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctsxs((__a), (__b)), vector double             \
+           : __builtin_altivec_vctsxs((vector float)(__a), (__b)),             \
+             vector double                                                     \
            : __extension__({                                                   \
              vector double __ret =                                             \
-                 (__a) *                                                       \
+                 (vector double)(__a) *                                        \
                  (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
                                                             << 52);            \
              __builtin_convertvector(__ret, vector signed long long);          \
@@ -2931,10 +2934,11 @@
 #ifdef __VSX__
 #define vec_ctu(__a, __b)                                                      \
   _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctuxs((__a), (__b)), vector double             \
+           : __builtin_altivec_vctuxs((vector float)(__a), (__b)),             \
+             vector double                                                     \
            : __extension__({                                                   \
              vector double __ret =                                             \
-                 (__a) *                                                       \
+                 (vector double)(__a) *                                        \
                  (vector double)(vector unsigned long long)((0x3ffULL + __b)   \
                                                             << 52);            \
              __builtin_convertvector(__ret, vector unsigned long long);        \
@@ -3286,9 +3290,7 @@
 
 /* vec_dss */
 
-static __inline__ void __attribute__((__always_inline__)) vec_dss(int __a) {
-  __builtin_altivec_dss(__a);
-}
+#define vec_dss __builtin_altivec_dss
 
 /* vec_dssall */
 
@@ -6301,19 +6303,20 @@
 #ifdef __VSX__
 static __inline__ vector double __ATTRS_o_ai vec_or(vector bool long long __a,
                                                     vector double __b) {
-  return (vector unsigned long long)__a | (vector unsigned long long)__b;
+  return (vector double)((vector unsigned long long)__a |
+                         (vector unsigned long long)__b);
 }
 
 static __inline__ vector double __ATTRS_o_ai vec_or(vector double __a,
                                                     vector bool long long __b) {
-  return (vector unsigned long long)__a | (vector unsigned long long)__b;
+  return (vector double)((vector unsigned long long)__a |
+                         (vector unsigned long long)__b);
 }
 
 static __inline__ vector double __ATTRS_o_ai vec_or(vector double __a,
                                                     vector double __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a | (vector unsigned long long)__b;
-  return (vector double)__res;
+  return (vector double)((vector unsigned long long)__a |
+                         (vector unsigned long long)__b);
 }
 
 static __inline__ vector signed long long __ATTRS_o_ai
@@ -14781,7 +14784,7 @@
 static __inline__ int __ATTRS_o_ai vec_all_ne(vector float __a,
                                               vector float __b) {
 #ifdef __VSX__
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b);
+  return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ, __a, __b);
 #else
   return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __b);
 #endif
@@ -16361,27 +16364,32 @@
 
 static inline __ATTRS_o_ai vector signed short vec_xl(signed long long __offset,
                                                       signed short *__ptr) {
-  return *(unaligned_vec_sshort *)(__ptr + __offset);
+  signed char *__addr = (signed char *)__ptr + __offset;
+  return *(unaligned_vec_sshort *)__addr;
 }
 
 static inline __ATTRS_o_ai vector unsigned short
 vec_xl(signed long long __offset, unsigned short *__ptr) {
-  return *(unaligned_vec_ushort *)(__ptr + __offset);
+  signed char *__addr = (signed char *)__ptr + __offset;
+  return *(unaligned_vec_ushort *)__addr;
 }
 
 static inline __ATTRS_o_ai vector signed int vec_xl(signed long long __offset,
                                                     signed int *__ptr) {
-  return *(unaligned_vec_sint *)(__ptr + __offset);
+  signed char *__addr = (signed char *)__ptr + __offset;
+  return *(unaligned_vec_sint *)__addr;
 }
 
 static inline __ATTRS_o_ai vector unsigned int vec_xl(signed long long __offset,
                                                       unsigned int *__ptr) {
-  return *(unaligned_vec_uint *)(__ptr + __offset);
+  signed char *__addr = (signed char *)__ptr + __offset;
+  return *(unaligned_vec_uint *)__addr;
 }
 
 static inline __ATTRS_o_ai vector float vec_xl(signed long long __offset,
                                                float *__ptr) {
-  return *(unaligned_vec_float *)(__ptr + __offset);
+  signed char *__addr = (signed char *)__ptr + __offset;
+  return *(unaligned_vec_float *)__addr;
 }
 
 #ifdef __VSX__
@@ -16391,17 +16399,20 @@
 
 static inline __ATTRS_o_ai vector signed long long
 vec_xl(signed long long __offset, signed long long *__ptr) {
-  return *(unaligned_vec_sll *)(__ptr + __offset);
+  signed char *__addr = (signed char *)__ptr + __offset;
+  return *(unaligned_vec_sll *)__addr;
 }
 
 static inline __ATTRS_o_ai vector unsigned long long
 vec_xl(signed long long __offset, unsigned long long *__ptr) {
-  return *(unaligned_vec_ull *)(__ptr + __offset);
+  signed char *__addr = (signed char *)__ptr + __offset;
+  return *(unaligned_vec_ull *)__addr;
 }
 
 static inline __ATTRS_o_ai vector double vec_xl(signed long long __offset,
                                                 double *__ptr) {
-  return *(unaligned_vec_double *)(__ptr + __offset);
+  signed char *__addr = (signed char *)__ptr + __offset;
+  return *(unaligned_vec_double *)__addr;
 }
 #endif
 
@@ -16411,12 +16422,14 @@
     __attribute__((aligned(1)));
 static inline __ATTRS_o_ai vector signed __int128
 vec_xl(signed long long __offset, signed __int128 *__ptr) {
-  return *(unaligned_vec_si128 *)(__ptr + __offset);
+  signed char *__addr = (signed char *)__ptr + __offset;
+  return *(unaligned_vec_si128 *)__addr;
 }
 
 static inline __ATTRS_o_ai vector unsigned __int128
 vec_xl(signed long long __offset, unsigned __int128 *__ptr) {
-  return *(unaligned_vec_ui128 *)(__ptr + __offset);
+  signed char *__addr = (signed char *)__ptr + __offset;
+  return *(unaligned_vec_ui128 *)__addr;
 }
 #endif
 
@@ -16425,27 +16438,27 @@
 #ifdef __LITTLE_ENDIAN__
 static __inline__ vector signed char __ATTRS_o_ai
 vec_xl_be(signed long long __offset, signed char *__ptr) {
-  vector signed char __vec = __builtin_vsx_lxvd2x_be(__offset, __ptr);
+  vector signed char __vec = (vector signed char)__builtin_vsx_lxvd2x_be(__offset, __ptr);
   return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
                                  13, 12, 11, 10, 9, 8);
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_xl_be(signed long long __offset, unsigned char *__ptr) {
-  vector unsigned char __vec = __builtin_vsx_lxvd2x_be(__offset, __ptr);
+  vector unsigned char __vec = (vector unsigned char)__builtin_vsx_lxvd2x_be(__offset, __ptr);
   return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
                                  13, 12, 11, 10, 9, 8);
 }
 
 static __inline__ vector signed short  __ATTRS_o_ai
 vec_xl_be(signed long long __offset, signed short *__ptr) {
-  vector signed short __vec = __builtin_vsx_lxvd2x_be(__offset, __ptr);
+  vector signed short __vec = (vector signed short)__builtin_vsx_lxvd2x_be(__offset, __ptr);
   return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
 }
 
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_xl_be(signed long long __offset, unsigned short *__ptr) {
-  vector unsigned short __vec = __builtin_vsx_lxvd2x_be(__offset, __ptr);
+  vector unsigned short __vec = (vector unsigned short)__builtin_vsx_lxvd2x_be(__offset, __ptr);
   return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
 }
 
@@ -16513,50 +16526,58 @@
 static inline __ATTRS_o_ai void vec_xst(vector signed short __vec,
                                         signed long long __offset,
                                         signed short *__ptr) {
-  *(unaligned_vec_sshort *)(__ptr + __offset) = __vec;
+  signed char *__addr = (signed char *)__ptr + __offset;
+  *(unaligned_vec_sshort *)__addr = __vec;
 }
 
 static inline __ATTRS_o_ai void vec_xst(vector unsigned short __vec,
                                         signed long long __offset,
                                         unsigned short *__ptr) {
-  *(unaligned_vec_ushort *)(__ptr + __offset) = __vec;
+  signed char *__addr = (signed char *)__ptr + __offset;
+  *(unaligned_vec_ushort *)__addr = __vec;
 }
 
 static inline __ATTRS_o_ai void vec_xst(vector signed int __vec,
                                         signed long long __offset,
                                         signed int *__ptr) {
-  *(unaligned_vec_sint *)(__ptr + __offset) = __vec;
+  signed char *__addr = (signed char *)__ptr + __offset;
+  *(unaligned_vec_sint *)__addr = __vec;
 }
 
 static inline __ATTRS_o_ai void vec_xst(vector unsigned int __vec,
                                         signed long long __offset,
                                         unsigned int *__ptr) {
-  *(unaligned_vec_uint *)(__ptr + __offset) = __vec;
+  signed char *__addr = (signed char *)__ptr + __offset;
+  *(unaligned_vec_uint *)__addr = __vec;
 }
 
 static inline __ATTRS_o_ai void vec_xst(vector float __vec,
                                         signed long long __offset,
                                         float *__ptr) {
-  *(unaligned_vec_float *)(__ptr + __offset) = __vec;
+  signed char *__addr = (signed char *)__ptr + __offset;
+  *(unaligned_vec_float *)__addr = __vec;
 }
 
 #ifdef __VSX__
 static inline __ATTRS_o_ai void vec_xst(vector signed long long __vec,
                                         signed long long __offset,
                                         signed long long *__ptr) {
-  *(unaligned_vec_sll *)(__ptr + __offset) = __vec;
+  signed char *__addr = (signed char *)__ptr + __offset;
+  *(unaligned_vec_sll *)__addr = __vec;
 }
 
 static inline __ATTRS_o_ai void vec_xst(vector unsigned long long __vec,
                                         signed long long __offset,
                                         unsigned long long *__ptr) {
-  *(unaligned_vec_ull *)(__ptr + __offset) = __vec;
+  signed char *__addr = (signed char *)__ptr + __offset;
+  *(unaligned_vec_ull *)__addr = __vec;
 }
 
 static inline __ATTRS_o_ai void vec_xst(vector double __vec,
                                         signed long long __offset,
                                         double *__ptr) {
-  *(unaligned_vec_double *)(__ptr + __offset) = __vec;
+  signed char *__addr = (signed char *)__ptr + __offset;
+  *(unaligned_vec_double *)__addr = __vec;
 }
 #endif
 
@@ -16564,13 +16585,15 @@
 static inline __ATTRS_o_ai void vec_xst(vector signed __int128 __vec,
                                         signed long long __offset,
                                         signed __int128 *__ptr) {
-  *(unaligned_vec_si128 *)(__ptr + __offset) = __vec;
+  signed char *__addr = (signed char *)__ptr + __offset;
+  *(unaligned_vec_si128 *)__addr = __vec;
 }
 
 static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
                                         signed long long __offset,
                                         unsigned __int128 *__ptr) {
-  *(unaligned_vec_ui128 *)(__ptr + __offset) = __vec;
+  signed char *__addr = (signed char *)__ptr + __offset;
+  *(unaligned_vec_ui128 *)__addr = __vec;
 }
 #endif
 
@@ -16583,7 +16606,8 @@
   vector signed char __tmp =
      __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
                              13, 12, 11, 10, 9, 8);
-  __builtin_vsx_stxvd2x_be(__tmp, __offset, __ptr);
+  typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double;
+  __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr);
 }
 
 static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned char __vec,
@@ -16592,7 +16616,8 @@
   vector unsigned char __tmp =
      __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
                              13, 12, 11, 10, 9, 8);
-  __builtin_vsx_stxvd2x_be(__tmp, __offset, __ptr);
+  typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double;
+  __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr);
 }
 
 static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed short __vec,
@@ -16600,7 +16625,8 @@
                                                signed short *__ptr) {
   vector signed short __tmp =
      __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
-  __builtin_vsx_stxvd2x_be(__tmp, __offset, __ptr);
+  typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double;
+  __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr);
 }
 
 static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned short __vec,
@@ -16608,7 +16634,8 @@
                                                unsigned short *__ptr) {
   vector unsigned short __tmp =
      __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
-  __builtin_vsx_stxvd2x_be(__tmp, __offset, __ptr);
+  typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double;
+  __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr);
 }
 
 static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed int __vec,
@@ -16620,32 +16647,32 @@
 static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned int __vec,
                                                signed long long  __offset,
                                                unsigned int *__ptr) {
-  __builtin_vsx_stxvw4x_be(__vec, __offset, __ptr);
+  __builtin_vsx_stxvw4x_be((vector int)__vec, __offset, __ptr);
 }
 
 static __inline__ void __ATTRS_o_ai vec_xst_be(vector float __vec,
                                                signed long long  __offset,
                                                float *__ptr) {
-  __builtin_vsx_stxvw4x_be(__vec, __offset, __ptr);
+  __builtin_vsx_stxvw4x_be((vector int)__vec, __offset, __ptr);
 }
 
 #ifdef __VSX__
 static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed long long __vec,
                                                signed long long  __offset,
                                                signed long long *__ptr) {
-  __builtin_vsx_stxvd2x_be(__vec, __offset, __ptr);
+  __builtin_vsx_stxvd2x_be((vector double)__vec, __offset, __ptr);
 }
 
 static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned long long __vec,
                                                signed long long  __offset,
                                                unsigned long long *__ptr) {
-  __builtin_vsx_stxvd2x_be(__vec, __offset, __ptr);
+  __builtin_vsx_stxvd2x_be((vector double)__vec, __offset, __ptr);
 }
 
 static __inline__ void __ATTRS_o_ai vec_xst_be(vector double __vec,
                                                signed long long  __offset,
                                                double *__ptr) {
-  __builtin_vsx_stxvd2x_be(__vec, __offset, __ptr);
+  __builtin_vsx_stxvd2x_be((vector double)__vec, __offset, __ptr);
 }
 #endif
 
@@ -16667,13 +16694,13 @@
 #endif
 
 #ifdef __POWER9_VECTOR__
-#define vec_test_data_class(__a, __b)                                      \
-        _Generic((__a),                                                    \
-           vector float:                                                   \
-             (vector bool int)__builtin_vsx_xvtstdcsp((__a), (__b)),       \
-           vector double:                                                  \
-             (vector bool long long)__builtin_vsx_xvtstdcdp((__a), (__b))  \
-        )
+#define vec_test_data_class(__a, __b)                                          \
+  _Generic(                                                                    \
+      (__a), vector float                                                      \
+      : (vector bool int)__builtin_vsx_xvtstdcsp((vector float)(__a), (__b)),  \
+        vector double                                                          \
+      : (vector bool long long)__builtin_vsx_xvtstdcdp((vector double)(__a),   \
+                                                       (__b)))
 
 #endif /* #ifdef __POWER9_VECTOR__ */
 
diff --git a/darwin-x86/lib64/clang/10.0.1/include/ammintrin.h b/darwin-x86/lib64/clang/10.0.5/include/ammintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/ammintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/ammintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/arm64intr.h b/darwin-x86/lib64/clang/10.0.5/include/arm64intr.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/arm64intr.h
rename to darwin-x86/lib64/clang/10.0.5/include/arm64intr.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/arm_acle.h b/darwin-x86/lib64/clang/10.0.5/include/arm_acle.h
similarity index 95%
rename from darwin-x86/lib64/clang/10.0.1/include/arm_acle.h
rename to darwin-x86/lib64/clang/10.0.5/include/arm_acle.h
index 0510e6f..596ea03 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/arm_acle.h
+++ b/darwin-x86/lib64/clang/10.0.5/include/arm_acle.h
@@ -90,9 +90,11 @@
 #endif
 
 /* 8.7 NOP */
+#if !defined(_MSC_VER) || !defined(__aarch64__)
 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) {
   __builtin_arm_nop();
 }
+#endif
 
 /* 9 DATA-PROCESSING INTRINSICS */
 /* 9.2 Miscellaneous data-processing intrinsics */
@@ -139,6 +141,26 @@
   return __builtin_clzll(__t);
 }
 
+/* CLS */
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__cls(uint32_t __t) {
+  return __builtin_arm_cls(__t);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__clsl(unsigned long __t) {
+#if __SIZEOF_LONG__ == 4
+  return __builtin_arm_cls(__t);
+#else
+  return __builtin_arm_cls64(__t);
+#endif
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__clsll(uint64_t __t) {
+  return __builtin_arm_cls64(__t);
+}
+
 /* REV */
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
 __rev(uint32_t __t) {
@@ -609,9 +631,13 @@
 #define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg)
 #define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg)
 #define __arm_rsrp(sysreg) __builtin_arm_rsrp(sysreg)
+#define __arm_rsrf(sysreg) __builtin_bit_cast(float, __arm_rsr(sysreg))
+#define __arm_rsrf64(sysreg) __builtin_bit_cast(double, __arm_rsr64(sysreg))
 #define __arm_wsr(sysreg, v) __builtin_arm_wsr(sysreg, v)
 #define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v)
 #define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v)
+#define __arm_wsrf(sysreg, v) __arm_wsr(sysreg, __builtin_bit_cast(uint32_t, v))
+#define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v))
 
 /* Memory Tagging Extensions (MTE) Intrinsics */
 #if __ARM_FEATURE_MEMORY_TAGGING
diff --git a/darwin-x86/lib64/clang/10.0.1/include/arm_fp16.h b/darwin-x86/lib64/clang/10.0.5/include/arm_fp16.h
similarity index 99%
rename from darwin-x86/lib64/clang/10.0.1/include/arm_fp16.h
rename to darwin-x86/lib64/clang/10.0.5/include/arm_fp16.h
index 1c04f2a..71b18c1 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/arm_fp16.h
+++ b/darwin-x86/lib64/clang/10.0.5/include/arm_fp16.h
@@ -251,66 +251,66 @@
   __ret = (uint64_t) __builtin_neon_vcvtah_u64_f16(__s0); \
   __ret; \
 })
-__ai float16_t vcvth_f16_u32(uint32_t __p0) {
-  float16_t __ret;
-  __ret = (float16_t) __builtin_neon_vcvth_f16_u32(__p0);
-  return __ret;
-}
-__ai float16_t vcvth_f16_u64(uint64_t __p0) {
-  float16_t __ret;
-  __ret = (float16_t) __builtin_neon_vcvth_f16_u64(__p0);
-  return __ret;
-}
 __ai float16_t vcvth_f16_u16(uint16_t __p0) {
   float16_t __ret;
   __ret = (float16_t) __builtin_neon_vcvth_f16_u16(__p0);
   return __ret;
 }
+__ai float16_t vcvth_f16_s16(int16_t __p0) {
+  float16_t __ret;
+  __ret = (float16_t) __builtin_neon_vcvth_f16_s16(__p0);
+  return __ret;
+}
+__ai float16_t vcvth_f16_u32(uint32_t __p0) {
+  float16_t __ret;
+  __ret = (float16_t) __builtin_neon_vcvth_f16_u32(__p0);
+  return __ret;
+}
 __ai float16_t vcvth_f16_s32(int32_t __p0) {
   float16_t __ret;
   __ret = (float16_t) __builtin_neon_vcvth_f16_s32(__p0);
   return __ret;
 }
+__ai float16_t vcvth_f16_u64(uint64_t __p0) {
+  float16_t __ret;
+  __ret = (float16_t) __builtin_neon_vcvth_f16_u64(__p0);
+  return __ret;
+}
 __ai float16_t vcvth_f16_s64(int64_t __p0) {
   float16_t __ret;
   __ret = (float16_t) __builtin_neon_vcvth_f16_s64(__p0);
   return __ret;
 }
-__ai float16_t vcvth_f16_s16(int16_t __p0) {
-  float16_t __ret;
-  __ret = (float16_t) __builtin_neon_vcvth_f16_s16(__p0);
-  return __ret;
-}
 #define vcvth_n_f16_u32(__p0, __p1) __extension__ ({ \
   uint32_t __s0 = __p0; \
   float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vcvth_n_f16_u32(__s0, __p1); \
   __ret; \
 })
-#define vcvth_n_f16_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_n_f16_u64(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_n_f16_u16(__s0, __p1); \
-  __ret; \
-})
 #define vcvth_n_f16_s32(__p0, __p1) __extension__ ({ \
   int32_t __s0 = __p0; \
   float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vcvth_n_f16_s32(__s0, __p1); \
   __ret; \
 })
+#define vcvth_n_f16_u64(__p0, __p1) __extension__ ({ \
+  uint64_t __s0 = __p0; \
+  float16_t __ret; \
+  __ret = (float16_t) __builtin_neon_vcvth_n_f16_u64(__s0, __p1); \
+  __ret; \
+})
 #define vcvth_n_f16_s64(__p0, __p1) __extension__ ({ \
   int64_t __s0 = __p0; \
   float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vcvth_n_f16_s64(__s0, __p1); \
   __ret; \
 })
+#define vcvth_n_f16_u16(__p0, __p1) __extension__ ({ \
+  uint16_t __s0 = __p0; \
+  float16_t __ret; \
+  __ret = (float16_t) __builtin_neon_vcvth_n_f16_u16(__s0, __p1); \
+  __ret; \
+})
 #define vcvth_n_f16_s16(__p0, __p1) __extension__ ({ \
   int16_t __s0 = __p0; \
   float16_t __ret; \
diff --git a/darwin-x86/lib64/clang/10.0.5/include/arm_mve.h b/darwin-x86/lib64/clang/10.0.5/include/arm_mve.h
new file mode 100644
index 0000000..f7f4c4a
--- /dev/null
+++ b/darwin-x86/lib64/clang/10.0.5/include/arm_mve.h
@@ -0,0 +1,4831 @@
+/*===---- arm_mve.h - ARM MVE intrinsics -----------------------------------===
+ *
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ARM_MVE_H
+#define __ARM_MVE_H
+
+#if !__ARM_FEATURE_MVE
+#error "MVE support not enabled"
+#endif
+
+#include <stdint.h>
+
+typedef uint16_t mve_pred16_t;
+typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t;
+typedef struct { int16x8_t val[2]; } int16x8x2_t;
+typedef struct { int16x8_t val[4]; } int16x8x4_t;
+typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t;
+typedef struct { int32x4_t val[2]; } int32x4x2_t;
+typedef struct { int32x4_t val[4]; } int32x4x4_t;
+typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t;
+typedef struct { int64x2_t val[2]; } int64x2x2_t;
+typedef struct { int64x2_t val[4]; } int64x2x4_t;
+typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t;
+typedef struct { int8x16_t val[2]; } int8x16x2_t;
+typedef struct { int8x16_t val[4]; } int8x16x4_t;
+typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t;
+typedef struct { uint16x8_t val[2]; } uint16x8x2_t;
+typedef struct { uint16x8_t val[4]; } uint16x8x4_t;
+typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t;
+typedef struct { uint32x4_t val[2]; } uint32x4x2_t;
+typedef struct { uint32x4_t val[4]; } uint32x4x4_t;
+typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t;
+typedef struct { uint64x2_t val[2]; } uint64x2x2_t;
+typedef struct { uint64x2_t val[4]; } uint64x2x4_t;
+typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t;
+typedef struct { uint8x16_t val[2]; } uint8x16x2_t;
+typedef struct { uint8x16_t val[4]; } uint8x16x4_t;
+
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_asrl)))
+int64_t __arm_asrl(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_lsll)))
+uint64_t __arm_lsll(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshr)))
+int32_t __arm_sqrshr(int32_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshrl)))
+int64_t __arm_sqrshrl(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshrl_sat48)))
+int64_t __arm_sqrshrl_sat48(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqshl)))
+int32_t __arm_sqshl(int32_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqshll)))
+int64_t __arm_sqshll(int64_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_srshr)))
+int32_t __arm_srshr(int32_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_srshrl)))
+int64_t __arm_srshrl(int64_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshl)))
+uint32_t __arm_uqrshl(uint32_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshll)))
+uint64_t __arm_uqrshll(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshll_sat48)))
+uint64_t __arm_uqrshll_sat48(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqshl)))
+uint32_t __arm_uqshl(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqshll)))
+uint64_t __arm_uqshll(uint64_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_urshr)))
+uint32_t __arm_urshr(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_urshrl)))
+uint64_t __arm_urshrl(uint64_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_s32)))
+int32x4_t __arm_vadciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_s32)))
+int32x4_t __arm_vadciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_u32)))
+uint32x4_t __arm_vadciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_u32)))
+uint32x4_t __arm_vadciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_s32)))
+int32x4_t __arm_vadciq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_s32)))
+int32x4_t __arm_vadciq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_u32)))
+uint32x4_t __arm_vadciq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_u32)))
+uint32x4_t __arm_vadciq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_s32)))
+int32x4_t __arm_vadcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_s32)))
+int32x4_t __arm_vadcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_u32)))
+uint32x4_t __arm_vadcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_u32)))
+uint32x4_t __arm_vadcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_s32)))
+int32x4_t __arm_vadcq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_s32)))
+int32x4_t __arm_vadcq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_u32)))
+uint32x4_t __arm_vadcq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_u32)))
+uint32x4_t __arm_vadcq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s16)))
+int16x8_t __arm_vaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s16)))
+int16x8_t __arm_vaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s32)))
+int32x4_t __arm_vaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s32)))
+int32x4_t __arm_vaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s8)))
+int8x16_t __arm_vaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s8)))
+int8x16_t __arm_vaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u16)))
+uint16x8_t __arm_vaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u16)))
+uint16x8_t __arm_vaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u32)))
+uint32x4_t __arm_vaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u32)))
+uint32x4_t __arm_vaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u8)))
+uint8x16_t __arm_vaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u8)))
+uint8x16_t __arm_vaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s16)))
+int16x8_t __arm_vaddq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s16)))
+int16x8_t __arm_vaddq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s32)))
+int32x4_t __arm_vaddq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s32)))
+int32x4_t __arm_vaddq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s8)))
+int8x16_t __arm_vaddq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s8)))
+int8x16_t __arm_vaddq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u16)))
+uint16x8_t __arm_vaddq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u16)))
+uint16x8_t __arm_vaddq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u32)))
+uint32x4_t __arm_vaddq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u32)))
+uint32x4_t __arm_vaddq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u8)))
+uint8x16_t __arm_vaddq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u8)))
+uint8x16_t __arm_vaddq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
+mve_pred16_t __arm_vcmpcsq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
+mve_pred16_t __arm_vcmpcsq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
+mve_pred16_t __arm_vcmpcsq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
+mve_pred16_t __arm_vcmpcsq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
+mve_pred16_t __arm_vcmpcsq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
+mve_pred16_t __arm_vcmpcsq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
+mve_pred16_t __arm_vcmpcsq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
+mve_pred16_t __arm_vcmpcsq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
+mve_pred16_t __arm_vcmpcsq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
+mve_pred16_t __arm_vcmpcsq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
+mve_pred16_t __arm_vcmpcsq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
+mve_pred16_t __arm_vcmpcsq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
+mve_pred16_t __arm_vcmpcsq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
+mve_pred16_t __arm_vcmpcsq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
+mve_pred16_t __arm_vcmpcsq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
+mve_pred16_t __arm_vcmpcsq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
+mve_pred16_t __arm_vcmpcsq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
+mve_pred16_t __arm_vcmpcsq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u16)))
+mve_pred16_t __arm_vcmpcsq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u16)))
+mve_pred16_t __arm_vcmpcsq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u32)))
+mve_pred16_t __arm_vcmpcsq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u32)))
+mve_pred16_t __arm_vcmpcsq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u8)))
+mve_pred16_t __arm_vcmpcsq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u8)))
+mve_pred16_t __arm_vcmpcsq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
+mve_pred16_t __arm_vcmpeqq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
+mve_pred16_t __arm_vcmpeqq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
+mve_pred16_t __arm_vcmpeqq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
+mve_pred16_t __arm_vcmpeqq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
+mve_pred16_t __arm_vcmpeqq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
+mve_pred16_t __arm_vcmpeqq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
+mve_pred16_t __arm_vcmpeqq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
+mve_pred16_t __arm_vcmpeqq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
+mve_pred16_t __arm_vcmpeqq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
+mve_pred16_t __arm_vcmpeqq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
+mve_pred16_t __arm_vcmpeqq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
+mve_pred16_t __arm_vcmpeqq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
+mve_pred16_t __arm_vcmpeqq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
+mve_pred16_t __arm_vcmpeqq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
+mve_pred16_t __arm_vcmpeqq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
+mve_pred16_t __arm_vcmpeqq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
+mve_pred16_t __arm_vcmpeqq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
+mve_pred16_t __arm_vcmpeqq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
+mve_pred16_t __arm_vcmpeqq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
+mve_pred16_t __arm_vcmpeqq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
+mve_pred16_t __arm_vcmpeqq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
+mve_pred16_t __arm_vcmpeqq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
+mve_pred16_t __arm_vcmpeqq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
+mve_pred16_t __arm_vcmpeqq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
+mve_pred16_t __arm_vcmpeqq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
+mve_pred16_t __arm_vcmpeqq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
+mve_pred16_t __arm_vcmpeqq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
+mve_pred16_t __arm_vcmpeqq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
+mve_pred16_t __arm_vcmpeqq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
+mve_pred16_t __arm_vcmpeqq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
+mve_pred16_t __arm_vcmpeqq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
+mve_pred16_t __arm_vcmpeqq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
+mve_pred16_t __arm_vcmpeqq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
+mve_pred16_t __arm_vcmpeqq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
+mve_pred16_t __arm_vcmpeqq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
+mve_pred16_t __arm_vcmpeqq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s16)))
+mve_pred16_t __arm_vcmpeqq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s16)))
+mve_pred16_t __arm_vcmpeqq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s32)))
+mve_pred16_t __arm_vcmpeqq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s32)))
+mve_pred16_t __arm_vcmpeqq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s8)))
+mve_pred16_t __arm_vcmpeqq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s8)))
+mve_pred16_t __arm_vcmpeqq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u16)))
+mve_pred16_t __arm_vcmpeqq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u16)))
+mve_pred16_t __arm_vcmpeqq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u32)))
+mve_pred16_t __arm_vcmpeqq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u32)))
+mve_pred16_t __arm_vcmpeqq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u8)))
+mve_pred16_t __arm_vcmpeqq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u8)))
+mve_pred16_t __arm_vcmpeqq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
+mve_pred16_t __arm_vcmpgeq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
+mve_pred16_t __arm_vcmpgeq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
+mve_pred16_t __arm_vcmpgeq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
+mve_pred16_t __arm_vcmpgeq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
+mve_pred16_t __arm_vcmpgeq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
+mve_pred16_t __arm_vcmpgeq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
+mve_pred16_t __arm_vcmpgeq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
+mve_pred16_t __arm_vcmpgeq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
+mve_pred16_t __arm_vcmpgeq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
+mve_pred16_t __arm_vcmpgeq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
+mve_pred16_t __arm_vcmpgeq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
+mve_pred16_t __arm_vcmpgeq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
+mve_pred16_t __arm_vcmpgeq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
+mve_pred16_t __arm_vcmpgeq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
+mve_pred16_t __arm_vcmpgeq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
+mve_pred16_t __arm_vcmpgeq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
+mve_pred16_t __arm_vcmpgeq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
+mve_pred16_t __arm_vcmpgeq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s16)))
+mve_pred16_t __arm_vcmpgeq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s16)))
+mve_pred16_t __arm_vcmpgeq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s32)))
+mve_pred16_t __arm_vcmpgeq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s32)))
+mve_pred16_t __arm_vcmpgeq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s8)))
+mve_pred16_t __arm_vcmpgeq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s8)))
+mve_pred16_t __arm_vcmpgeq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
+mve_pred16_t __arm_vcmpgtq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
+mve_pred16_t __arm_vcmpgtq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
+mve_pred16_t __arm_vcmpgtq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
+mve_pred16_t __arm_vcmpgtq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
+mve_pred16_t __arm_vcmpgtq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
+mve_pred16_t __arm_vcmpgtq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
+mve_pred16_t __arm_vcmpgtq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
+mve_pred16_t __arm_vcmpgtq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
+mve_pred16_t __arm_vcmpgtq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
+mve_pred16_t __arm_vcmpgtq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
+mve_pred16_t __arm_vcmpgtq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
+mve_pred16_t __arm_vcmpgtq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
+mve_pred16_t __arm_vcmpgtq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
+mve_pred16_t __arm_vcmpgtq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
+mve_pred16_t __arm_vcmpgtq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
+mve_pred16_t __arm_vcmpgtq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
+mve_pred16_t __arm_vcmpgtq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
+mve_pred16_t __arm_vcmpgtq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s16)))
+mve_pred16_t __arm_vcmpgtq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s16)))
+mve_pred16_t __arm_vcmpgtq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s32)))
+mve_pred16_t __arm_vcmpgtq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s32)))
+mve_pred16_t __arm_vcmpgtq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s8)))
+mve_pred16_t __arm_vcmpgtq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s8)))
+mve_pred16_t __arm_vcmpgtq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
+mve_pred16_t __arm_vcmphiq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
+mve_pred16_t __arm_vcmphiq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
+mve_pred16_t __arm_vcmphiq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
+mve_pred16_t __arm_vcmphiq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
+mve_pred16_t __arm_vcmphiq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
+mve_pred16_t __arm_vcmphiq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u16)))
+mve_pred16_t __arm_vcmphiq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u16)))
+mve_pred16_t __arm_vcmphiq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u32)))
+mve_pred16_t __arm_vcmphiq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u32)))
+mve_pred16_t __arm_vcmphiq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u8)))
+mve_pred16_t __arm_vcmphiq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u8)))
+mve_pred16_t __arm_vcmphiq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u16)))
+mve_pred16_t __arm_vcmphiq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u16)))
+mve_pred16_t __arm_vcmphiq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u32)))
+mve_pred16_t __arm_vcmphiq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u32)))
+mve_pred16_t __arm_vcmphiq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u8)))
+mve_pred16_t __arm_vcmphiq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u8)))
+mve_pred16_t __arm_vcmphiq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u16)))
+mve_pred16_t __arm_vcmphiq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u16)))
+mve_pred16_t __arm_vcmphiq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u32)))
+mve_pred16_t __arm_vcmphiq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u32)))
+mve_pred16_t __arm_vcmphiq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u8)))
+mve_pred16_t __arm_vcmphiq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u8)))
+mve_pred16_t __arm_vcmphiq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
+mve_pred16_t __arm_vcmpleq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
+mve_pred16_t __arm_vcmpleq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
+mve_pred16_t __arm_vcmpleq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
+mve_pred16_t __arm_vcmpleq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
+mve_pred16_t __arm_vcmpleq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
+mve_pred16_t __arm_vcmpleq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s16)))
+mve_pred16_t __arm_vcmpleq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s16)))
+mve_pred16_t __arm_vcmpleq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s32)))
+mve_pred16_t __arm_vcmpleq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s32)))
+mve_pred16_t __arm_vcmpleq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s8)))
+mve_pred16_t __arm_vcmpleq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s8)))
+mve_pred16_t __arm_vcmpleq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s16)))
+mve_pred16_t __arm_vcmpleq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s16)))
+mve_pred16_t __arm_vcmpleq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s32)))
+mve_pred16_t __arm_vcmpleq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s32)))
+mve_pred16_t __arm_vcmpleq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s8)))
+mve_pred16_t __arm_vcmpleq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s8)))
+mve_pred16_t __arm_vcmpleq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s16)))
+mve_pred16_t __arm_vcmpleq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s16)))
+mve_pred16_t __arm_vcmpleq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s32)))
+mve_pred16_t __arm_vcmpleq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s32)))
+mve_pred16_t __arm_vcmpleq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s8)))
+mve_pred16_t __arm_vcmpleq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s8)))
+mve_pred16_t __arm_vcmpleq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
+mve_pred16_t __arm_vcmpltq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
+mve_pred16_t __arm_vcmpltq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
+mve_pred16_t __arm_vcmpltq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
+mve_pred16_t __arm_vcmpltq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
+mve_pred16_t __arm_vcmpltq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
+mve_pred16_t __arm_vcmpltq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s16)))
+mve_pred16_t __arm_vcmpltq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s16)))
+mve_pred16_t __arm_vcmpltq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s32)))
+mve_pred16_t __arm_vcmpltq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s32)))
+mve_pred16_t __arm_vcmpltq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s8)))
+mve_pred16_t __arm_vcmpltq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s8)))
+mve_pred16_t __arm_vcmpltq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s16)))
+mve_pred16_t __arm_vcmpltq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s16)))
+mve_pred16_t __arm_vcmpltq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s32)))
+mve_pred16_t __arm_vcmpltq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s32)))
+mve_pred16_t __arm_vcmpltq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s8)))
+mve_pred16_t __arm_vcmpltq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s8)))
+mve_pred16_t __arm_vcmpltq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s16)))
+mve_pred16_t __arm_vcmpltq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s16)))
+mve_pred16_t __arm_vcmpltq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s32)))
+mve_pred16_t __arm_vcmpltq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s32)))
+mve_pred16_t __arm_vcmpltq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s8)))
+mve_pred16_t __arm_vcmpltq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s8)))
+mve_pred16_t __arm_vcmpltq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
+mve_pred16_t __arm_vcmpneq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
+mve_pred16_t __arm_vcmpneq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
+mve_pred16_t __arm_vcmpneq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
+mve_pred16_t __arm_vcmpneq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
+mve_pred16_t __arm_vcmpneq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
+mve_pred16_t __arm_vcmpneq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
+mve_pred16_t __arm_vcmpneq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
+mve_pred16_t __arm_vcmpneq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
+mve_pred16_t __arm_vcmpneq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
+mve_pred16_t __arm_vcmpneq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
+mve_pred16_t __arm_vcmpneq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
+mve_pred16_t __arm_vcmpneq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s16)))
+mve_pred16_t __arm_vcmpneq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s16)))
+mve_pred16_t __arm_vcmpneq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s32)))
+mve_pred16_t __arm_vcmpneq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s32)))
+mve_pred16_t __arm_vcmpneq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s8)))
+mve_pred16_t __arm_vcmpneq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s8)))
+mve_pred16_t __arm_vcmpneq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u16)))
+mve_pred16_t __arm_vcmpneq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u16)))
+mve_pred16_t __arm_vcmpneq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u32)))
+mve_pred16_t __arm_vcmpneq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u32)))
+mve_pred16_t __arm_vcmpneq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u8)))
+mve_pred16_t __arm_vcmpneq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u8)))
+mve_pred16_t __arm_vcmpneq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s16)))
+mve_pred16_t __arm_vcmpneq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s16)))
+mve_pred16_t __arm_vcmpneq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s32)))
+mve_pred16_t __arm_vcmpneq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s32)))
+mve_pred16_t __arm_vcmpneq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s8)))
+mve_pred16_t __arm_vcmpneq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s8)))
+mve_pred16_t __arm_vcmpneq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u16)))
+mve_pred16_t __arm_vcmpneq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u16)))
+mve_pred16_t __arm_vcmpneq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u32)))
+mve_pred16_t __arm_vcmpneq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u32)))
+mve_pred16_t __arm_vcmpneq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u8)))
+mve_pred16_t __arm_vcmpneq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u8)))
+mve_pred16_t __arm_vcmpneq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s16)))
+mve_pred16_t __arm_vcmpneq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s16)))
+mve_pred16_t __arm_vcmpneq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s32)))
+mve_pred16_t __arm_vcmpneq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s32)))
+mve_pred16_t __arm_vcmpneq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s8)))
+mve_pred16_t __arm_vcmpneq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s8)))
+mve_pred16_t __arm_vcmpneq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u16)))
+mve_pred16_t __arm_vcmpneq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u16)))
+mve_pred16_t __arm_vcmpneq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u32)))
+mve_pred16_t __arm_vcmpneq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u32)))
+mve_pred16_t __arm_vcmpneq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u8)))
+mve_pred16_t __arm_vcmpneq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u8)))
+mve_pred16_t __arm_vcmpneq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s16)))
+int16x8_t __arm_vcreateq_s16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s32)))
+int32x4_t __arm_vcreateq_s32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s64)))
+int64x2_t __arm_vcreateq_s64(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s8)))
+int8x16_t __arm_vcreateq_s8(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u16)))
+uint16x8_t __arm_vcreateq_u16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u32)))
+uint32x4_t __arm_vcreateq_u32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u64)))
+uint64x2_t __arm_vcreateq_u64(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u8)))
+uint8x16_t __arm_vcreateq_u8(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s16)))
+int16_t __arm_vgetq_lane_s16(int16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s16)))
+int16_t __arm_vgetq_lane(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s32)))
+int32_t __arm_vgetq_lane_s32(int32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s32)))
+int32_t __arm_vgetq_lane(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s64)))
+int64_t __arm_vgetq_lane_s64(int64x2_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s64)))
+int64_t __arm_vgetq_lane(int64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s8)))
+int8_t __arm_vgetq_lane_s8(int8x16_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s8)))
+int8_t __arm_vgetq_lane(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u16)))
+uint16_t __arm_vgetq_lane_u16(uint16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u16)))
+uint16_t __arm_vgetq_lane(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u32)))
+uint32_t __arm_vgetq_lane_u32(uint32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u32)))
+uint32_t __arm_vgetq_lane(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u64)))
+uint64_t __arm_vgetq_lane_u64(uint64x2_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u64)))
+uint64_t __arm_vgetq_lane(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u8)))
+uint8_t __arm_vgetq_lane_u8(uint8x16_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u8)))
+uint8_t __arm_vgetq_lane(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s16)))
+int16x8_t __arm_vld1q_s16(const int16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s16)))
+int16x8_t __arm_vld1q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s32)))
+int32x4_t __arm_vld1q_s32(const int32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s32)))
+int32x4_t __arm_vld1q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s8)))
+int8x16_t __arm_vld1q_s8(const int8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s8)))
+int8x16_t __arm_vld1q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u16)))
+uint16x8_t __arm_vld1q_u16(const uint16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u16)))
+uint16x8_t __arm_vld1q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u32)))
+uint32x4_t __arm_vld1q_u32(const uint32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u32)))
+uint32x4_t __arm_vld1q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u8)))
+uint8x16_t __arm_vld1q_u8(const uint8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u8)))
+uint8x16_t __arm_vld1q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s16)))
+int16x8_t __arm_vld1q_z_s16(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s16)))
+int16x8_t __arm_vld1q_z(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s32)))
+int32x4_t __arm_vld1q_z_s32(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s32)))
+int32x4_t __arm_vld1q_z(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s8)))
+int8x16_t __arm_vld1q_z_s8(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s8)))
+int8x16_t __arm_vld1q_z(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u16)))
+uint16x8_t __arm_vld1q_z_u16(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u16)))
+uint16x8_t __arm_vld1q_z(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u32)))
+uint32x4_t __arm_vld1q_z_u32(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u32)))
+uint32x4_t __arm_vld1q_z(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u8)))
+uint8x16_t __arm_vld1q_z_u8(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u8)))
+uint8x16_t __arm_vld1q_z(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s16)))
+int16x8x2_t __arm_vld2q_s16(const int16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s16)))
+int16x8x2_t __arm_vld2q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s32)))
+int32x4x2_t __arm_vld2q_s32(const int32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s32)))
+int32x4x2_t __arm_vld2q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s8)))
+int8x16x2_t __arm_vld2q_s8(const int8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s8)))
+int8x16x2_t __arm_vld2q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u16)))
+uint16x8x2_t __arm_vld2q_u16(const uint16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u16)))
+uint16x8x2_t __arm_vld2q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u32)))
+uint32x4x2_t __arm_vld2q_u32(const uint32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u32)))
+uint32x4x2_t __arm_vld2q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u8)))
+uint8x16x2_t __arm_vld2q_u8(const uint8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u8)))
+uint8x16x2_t __arm_vld2q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s16)))
+int16x8x4_t __arm_vld4q_s16(const int16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s16)))
+int16x8x4_t __arm_vld4q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s32)))
+int32x4x4_t __arm_vld4q_s32(const int32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s32)))
+int32x4x4_t __arm_vld4q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s8)))
+int8x16x4_t __arm_vld4q_s8(const int8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s8)))
+int8x16x4_t __arm_vld4q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u16)))
+uint16x8x4_t __arm_vld4q_u16(const uint16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u16)))
+uint16x8x4_t __arm_vld4q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u32)))
+uint32x4x4_t __arm_vld4q_u32(const uint32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u32)))
+uint32x4x4_t __arm_vld4q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u8)))
+uint8x16x4_t __arm_vld4q_u8(const uint8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u8)))
+uint8x16x4_t __arm_vld4q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
+int16x8_t __arm_vldrbq_gather_offset_s16(const int8_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
+int16x8_t __arm_vldrbq_gather_offset(const int8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
+int32x4_t __arm_vldrbq_gather_offset_s32(const int8_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
+int32x4_t __arm_vldrbq_gather_offset(const int8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
+int8x16_t __arm_vldrbq_gather_offset_s8(const int8_t *, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
+int8x16_t __arm_vldrbq_gather_offset(const int8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
+uint16x8_t __arm_vldrbq_gather_offset_u16(const uint8_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
+uint16x8_t __arm_vldrbq_gather_offset(const uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
+uint32x4_t __arm_vldrbq_gather_offset_u32(const uint8_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
+uint32x4_t __arm_vldrbq_gather_offset(const uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
+uint8x16_t __arm_vldrbq_gather_offset_u8(const uint8_t *, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
+uint8x16_t __arm_vldrbq_gather_offset(const uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
+int16x8_t __arm_vldrbq_gather_offset_z_s16(const int8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
+int16x8_t __arm_vldrbq_gather_offset_z(const int8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
+int32x4_t __arm_vldrbq_gather_offset_z_s32(const int8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
+int32x4_t __arm_vldrbq_gather_offset_z(const int8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
+int8x16_t __arm_vldrbq_gather_offset_z_s8(const int8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
+int8x16_t __arm_vldrbq_gather_offset_z(const int8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
+uint16x8_t __arm_vldrbq_gather_offset_z_u16(const uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
+uint16x8_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrbq_gather_offset_z_u32(const uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
+uint8x16_t __arm_vldrbq_gather_offset_z_u8(const uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
+uint8x16_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s16)))
+int16x8_t __arm_vldrbq_s16(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s32)))
+int32x4_t __arm_vldrbq_s32(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s8)))
+int8x16_t __arm_vldrbq_s8(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u16)))
+uint16x8_t __arm_vldrbq_u16(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u32)))
+uint32x4_t __arm_vldrbq_u32(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u8)))
+uint8x16_t __arm_vldrbq_u8(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s16)))
+int16x8_t __arm_vldrbq_z_s16(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s32)))
+int32x4_t __arm_vldrbq_z_s32(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s8)))
+int8x16_t __arm_vldrbq_z_s8(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u16)))
+uint16x8_t __arm_vldrbq_z_u16(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u32)))
+uint32x4_t __arm_vldrbq_z_u32(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u8)))
+uint8x16_t __arm_vldrbq_z_u8(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_s64)))
+int64x2_t __arm_vldrdq_gather_base_s64(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_u64)))
+uint64x2_t __arm_vldrdq_gather_base_u64(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_s64)))
+int64x2_t __arm_vldrdq_gather_base_wb_s64(uint64x2_t *, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_u64)))
+uint64x2_t __arm_vldrdq_gather_base_wb_u64(uint64x2_t *, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_s64)))
+int64x2_t __arm_vldrdq_gather_base_wb_z_s64(uint64x2_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_u64)))
+uint64x2_t __arm_vldrdq_gather_base_wb_z_u64(uint64x2_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_z_s64)))
+int64x2_t __arm_vldrdq_gather_base_z_s64(uint64x2_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_z_u64)))
+uint64x2_t __arm_vldrdq_gather_base_z_u64(uint64x2_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
+int64x2_t __arm_vldrdq_gather_offset_s64(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
+int64x2_t __arm_vldrdq_gather_offset(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
+uint64x2_t __arm_vldrdq_gather_offset_u64(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
+uint64x2_t __arm_vldrdq_gather_offset(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
+int64x2_t __arm_vldrdq_gather_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
+int64x2_t __arm_vldrdq_gather_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
+uint64x2_t __arm_vldrdq_gather_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
+uint64x2_t __arm_vldrdq_gather_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
+int64x2_t __arm_vldrdq_gather_shifted_offset_s64(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
+int64x2_t __arm_vldrdq_gather_shifted_offset(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
+uint64x2_t __arm_vldrdq_gather_shifted_offset_u64(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
+uint64x2_t __arm_vldrdq_gather_shifted_offset(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
+int64x2_t __arm_vldrdq_gather_shifted_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
+int64x2_t __arm_vldrdq_gather_shifted_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
+uint64x2_t __arm_vldrdq_gather_shifted_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
+uint64x2_t __arm_vldrdq_gather_shifted_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
+int16x8_t __arm_vldrhq_gather_offset_s16(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
+int16x8_t __arm_vldrhq_gather_offset(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
+int32x4_t __arm_vldrhq_gather_offset_s32(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
+int32x4_t __arm_vldrhq_gather_offset(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
+uint16x8_t __arm_vldrhq_gather_offset_u16(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
+uint16x8_t __arm_vldrhq_gather_offset(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
+uint32x4_t __arm_vldrhq_gather_offset_u32(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
+uint32x4_t __arm_vldrhq_gather_offset(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
+int16x8_t __arm_vldrhq_gather_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
+int16x8_t __arm_vldrhq_gather_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
+int32x4_t __arm_vldrhq_gather_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
+int32x4_t __arm_vldrhq_gather_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
+uint16x8_t __arm_vldrhq_gather_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
+uint16x8_t __arm_vldrhq_gather_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrhq_gather_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrhq_gather_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
+int16x8_t __arm_vldrhq_gather_shifted_offset_s16(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
+int16x8_t __arm_vldrhq_gather_shifted_offset(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
+int32x4_t __arm_vldrhq_gather_shifted_offset_s32(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
+int32x4_t __arm_vldrhq_gather_shifted_offset(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
+uint16x8_t __arm_vldrhq_gather_shifted_offset_u16(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
+uint16x8_t __arm_vldrhq_gather_shifted_offset(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
+uint32x4_t __arm_vldrhq_gather_shifted_offset_u32(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
+uint32x4_t __arm_vldrhq_gather_shifted_offset(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
+int16x8_t __arm_vldrhq_gather_shifted_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
+int16x8_t __arm_vldrhq_gather_shifted_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
+int32x4_t __arm_vldrhq_gather_shifted_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
+int32x4_t __arm_vldrhq_gather_shifted_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
+uint16x8_t __arm_vldrhq_gather_shifted_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
+uint16x8_t __arm_vldrhq_gather_shifted_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
+uint32x4_t __arm_vldrhq_gather_shifted_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
+uint32x4_t __arm_vldrhq_gather_shifted_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_s16)))
+int16x8_t __arm_vldrhq_s16(const int16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_s32)))
+int32x4_t __arm_vldrhq_s32(const int16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_u16)))
+uint16x8_t __arm_vldrhq_u16(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_u32)))
+uint32x4_t __arm_vldrhq_u32(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_s16)))
+int16x8_t __arm_vldrhq_z_s16(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_s32)))
+int32x4_t __arm_vldrhq_z_s32(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_u16)))
+uint16x8_t __arm_vldrhq_z_u16(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_u32)))
+uint32x4_t __arm_vldrhq_z_u32(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_s32)))
+int32x4_t __arm_vldrwq_gather_base_s32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_u32)))
+uint32x4_t __arm_vldrwq_gather_base_u32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_s32)))
+int32x4_t __arm_vldrwq_gather_base_wb_s32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_u32)))
+uint32x4_t __arm_vldrwq_gather_base_wb_u32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_s32)))
+int32x4_t __arm_vldrwq_gather_base_wb_z_s32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_u32)))
+uint32x4_t __arm_vldrwq_gather_base_wb_z_u32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_s32)))
+int32x4_t __arm_vldrwq_gather_base_z_s32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_u32)))
+uint32x4_t __arm_vldrwq_gather_base_z_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
+int32x4_t __arm_vldrwq_gather_offset_s32(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
+int32x4_t __arm_vldrwq_gather_offset(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
+uint32x4_t __arm_vldrwq_gather_offset_u32(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
+uint32x4_t __arm_vldrwq_gather_offset(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
+int32x4_t __arm_vldrwq_gather_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
+int32x4_t __arm_vldrwq_gather_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrwq_gather_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrwq_gather_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
+int32x4_t __arm_vldrwq_gather_shifted_offset_s32(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
+int32x4_t __arm_vldrwq_gather_shifted_offset(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
+uint32x4_t __arm_vldrwq_gather_shifted_offset_u32(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
+uint32x4_t __arm_vldrwq_gather_shifted_offset(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
+int32x4_t __arm_vldrwq_gather_shifted_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
+int32x4_t __arm_vldrwq_gather_shifted_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
+uint32x4_t __arm_vldrwq_gather_shifted_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
+uint32x4_t __arm_vldrwq_gather_shifted_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_s32)))
+int32x4_t __arm_vldrwq_s32(const int32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_u32)))
+uint32x4_t __arm_vldrwq_u32(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_s32)))
+int32x4_t __arm_vldrwq_z_s32(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_u32)))
+uint32x4_t __arm_vldrwq_z_u32(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s16)))
+int16_t __arm_vmaxvq_s16(int16_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s16)))
+int16_t __arm_vmaxvq(int16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s32)))
+int32_t __arm_vmaxvq_s32(int32_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s32)))
+int32_t __arm_vmaxvq(int32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s8)))
+int8_t __arm_vmaxvq_s8(int8_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s8)))
+int8_t __arm_vmaxvq(int8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u16)))
+uint16_t __arm_vmaxvq_u16(uint16_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u16)))
+uint16_t __arm_vmaxvq(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u32)))
+uint32_t __arm_vmaxvq_u32(uint32_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u32)))
+uint32_t __arm_vmaxvq(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u8)))
+uint8_t __arm_vmaxvq_u8(uint8_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u8)))
+uint8_t __arm_vmaxvq(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s16)))
+int16_t __arm_vminvq_s16(int16_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s16)))
+int16_t __arm_vminvq(int16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s32)))
+int32_t __arm_vminvq_s32(int32_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s32)))
+int32_t __arm_vminvq(int32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s8)))
+int8_t __arm_vminvq_s8(int8_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s8)))
+int8_t __arm_vminvq(int8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u16)))
+uint16_t __arm_vminvq_u16(uint16_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u16)))
+uint16_t __arm_vminvq(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u32)))
+uint32_t __arm_vminvq_u32(uint32_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u32)))
+uint32_t __arm_vminvq(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u8)))
+uint8_t __arm_vminvq_u8(uint8_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u8)))
+uint8_t __arm_vminvq(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
+int16x8_t __arm_vreinterpretq_s16_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
+int16x8_t __arm_vreinterpretq_s16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
+int16x8_t __arm_vreinterpretq_s16_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
+int16x8_t __arm_vreinterpretq_s16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
+int16x8_t __arm_vreinterpretq_s16_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
+int16x8_t __arm_vreinterpretq_s16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
+int16x8_t __arm_vreinterpretq_s16_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
+int16x8_t __arm_vreinterpretq_s16(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
+int16x8_t __arm_vreinterpretq_s16_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
+int16x8_t __arm_vreinterpretq_s16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
+int16x8_t __arm_vreinterpretq_s16_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
+int16x8_t __arm_vreinterpretq_s16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
+int16x8_t __arm_vreinterpretq_s16_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
+int16x8_t __arm_vreinterpretq_s16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
+int32x4_t __arm_vreinterpretq_s32_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
+int32x4_t __arm_vreinterpretq_s32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
+int32x4_t __arm_vreinterpretq_s32_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
+int32x4_t __arm_vreinterpretq_s32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
+int32x4_t __arm_vreinterpretq_s32_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
+int32x4_t __arm_vreinterpretq_s32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
+int32x4_t __arm_vreinterpretq_s32_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
+int32x4_t __arm_vreinterpretq_s32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
+int32x4_t __arm_vreinterpretq_s32_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
+int32x4_t __arm_vreinterpretq_s32(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
+int32x4_t __arm_vreinterpretq_s32_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
+int32x4_t __arm_vreinterpretq_s32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
+int32x4_t __arm_vreinterpretq_s32_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
+int32x4_t __arm_vreinterpretq_s32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
+int64x2_t __arm_vreinterpretq_s64_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
+int64x2_t __arm_vreinterpretq_s64(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
+int64x2_t __arm_vreinterpretq_s64_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
+int64x2_t __arm_vreinterpretq_s64(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
+int64x2_t __arm_vreinterpretq_s64_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
+int64x2_t __arm_vreinterpretq_s64(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
+int64x2_t __arm_vreinterpretq_s64_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
+int64x2_t __arm_vreinterpretq_s64(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
+int64x2_t __arm_vreinterpretq_s64_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
+int64x2_t __arm_vreinterpretq_s64(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
+int64x2_t __arm_vreinterpretq_s64_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
+int64x2_t __arm_vreinterpretq_s64(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
+int64x2_t __arm_vreinterpretq_s64_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
+int64x2_t __arm_vreinterpretq_s64(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
+int8x16_t __arm_vreinterpretq_s8_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
+int8x16_t __arm_vreinterpretq_s8(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
+int8x16_t __arm_vreinterpretq_s8_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
+int8x16_t __arm_vreinterpretq_s8(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
+int8x16_t __arm_vreinterpretq_s8_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
+int8x16_t __arm_vreinterpretq_s8(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
+int8x16_t __arm_vreinterpretq_s8_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
+int8x16_t __arm_vreinterpretq_s8(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
+int8x16_t __arm_vreinterpretq_s8_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
+int8x16_t __arm_vreinterpretq_s8(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
+int8x16_t __arm_vreinterpretq_s8_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
+int8x16_t __arm_vreinterpretq_s8(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
+int8x16_t __arm_vreinterpretq_s8_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
+int8x16_t __arm_vreinterpretq_s8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
+uint16x8_t __arm_vreinterpretq_u16_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
+uint16x8_t __arm_vreinterpretq_u16(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
+uint16x8_t __arm_vreinterpretq_u16_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
+uint16x8_t __arm_vreinterpretq_u16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
+uint16x8_t __arm_vreinterpretq_u16_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
+uint16x8_t __arm_vreinterpretq_u16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
+uint16x8_t __arm_vreinterpretq_u16_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
+uint16x8_t __arm_vreinterpretq_u16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
+uint16x8_t __arm_vreinterpretq_u16_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
+uint16x8_t __arm_vreinterpretq_u16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
+uint16x8_t __arm_vreinterpretq_u16_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
+uint16x8_t __arm_vreinterpretq_u16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
+uint16x8_t __arm_vreinterpretq_u16_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
+uint16x8_t __arm_vreinterpretq_u16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
+uint32x4_t __arm_vreinterpretq_u32_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
+uint32x4_t __arm_vreinterpretq_u32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
+uint32x4_t __arm_vreinterpretq_u32_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
+uint32x4_t __arm_vreinterpretq_u32(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
+uint32x4_t __arm_vreinterpretq_u32_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
+uint32x4_t __arm_vreinterpretq_u32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
+uint32x4_t __arm_vreinterpretq_u32_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
+uint32x4_t __arm_vreinterpretq_u32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
+uint32x4_t __arm_vreinterpretq_u32_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
+uint32x4_t __arm_vreinterpretq_u32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
+uint32x4_t __arm_vreinterpretq_u32_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
+uint32x4_t __arm_vreinterpretq_u32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
+uint32x4_t __arm_vreinterpretq_u32_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
+uint32x4_t __arm_vreinterpretq_u32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
+uint64x2_t __arm_vreinterpretq_u64_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
+uint64x2_t __arm_vreinterpretq_u64(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
+uint64x2_t __arm_vreinterpretq_u64_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
+uint64x2_t __arm_vreinterpretq_u64(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
+uint64x2_t __arm_vreinterpretq_u64_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
+uint64x2_t __arm_vreinterpretq_u64(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
+uint64x2_t __arm_vreinterpretq_u64_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
+uint64x2_t __arm_vreinterpretq_u64(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
+uint64x2_t __arm_vreinterpretq_u64_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
+uint64x2_t __arm_vreinterpretq_u64(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
+uint64x2_t __arm_vreinterpretq_u64_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
+uint64x2_t __arm_vreinterpretq_u64(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
+uint64x2_t __arm_vreinterpretq_u64_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
+uint64x2_t __arm_vreinterpretq_u64(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
+uint8x16_t __arm_vreinterpretq_u8_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
+uint8x16_t __arm_vreinterpretq_u8(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
+uint8x16_t __arm_vreinterpretq_u8_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
+uint8x16_t __arm_vreinterpretq_u8(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
+uint8x16_t __arm_vreinterpretq_u8_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
+uint8x16_t __arm_vreinterpretq_u8(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
+uint8x16_t __arm_vreinterpretq_u8_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
+uint8x16_t __arm_vreinterpretq_u8(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
+uint8x16_t __arm_vreinterpretq_u8_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
+uint8x16_t __arm_vreinterpretq_u8(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
+uint8x16_t __arm_vreinterpretq_u8_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
+uint8x16_t __arm_vreinterpretq_u8(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
+uint8x16_t __arm_vreinterpretq_u8_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
+uint8x16_t __arm_vreinterpretq_u8(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s16)))
+int16x8_t __arm_vsetq_lane_s16(int16_t, int16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s16)))
+int16x8_t __arm_vsetq_lane(int16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s32)))
+int32x4_t __arm_vsetq_lane_s32(int32_t, int32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s32)))
+int32x4_t __arm_vsetq_lane(int32_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s64)))
+int64x2_t __arm_vsetq_lane_s64(int64_t, int64x2_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s64)))
+int64x2_t __arm_vsetq_lane(int64_t, int64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s8)))
+int8x16_t __arm_vsetq_lane_s8(int8_t, int8x16_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s8)))
+int8x16_t __arm_vsetq_lane(int8_t, int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u16)))
+uint16x8_t __arm_vsetq_lane_u16(uint16_t, uint16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u16)))
+uint16x8_t __arm_vsetq_lane(uint16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u32)))
+uint32x4_t __arm_vsetq_lane_u32(uint32_t, uint32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u32)))
+uint32x4_t __arm_vsetq_lane(uint32_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u64)))
+uint64x2_t __arm_vsetq_lane_u64(uint64_t, uint64x2_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u64)))
+uint64x2_t __arm_vsetq_lane(uint64_t, uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u8)))
+uint8x16_t __arm_vsetq_lane_u8(uint8_t, uint8x16_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u8)))
+uint8x16_t __arm_vsetq_lane(uint8_t, uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s16)))
+void __arm_vst1q_p_s16(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s16)))
+void __arm_vst1q_p(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s32)))
+void __arm_vst1q_p_s32(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s32)))
+void __arm_vst1q_p(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s8)))
+void __arm_vst1q_p_s8(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s8)))
+void __arm_vst1q_p(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u16)))
+void __arm_vst1q_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u16)))
+void __arm_vst1q_p(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u32)))
+void __arm_vst1q_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u32)))
+void __arm_vst1q_p(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u8)))
+void __arm_vst1q_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u8)))
+void __arm_vst1q_p(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s16)))
+void __arm_vst1q_s16(int16_t *, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s16)))
+void __arm_vst1q(int16_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s32)))
+void __arm_vst1q_s32(int32_t *, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s32)))
+void __arm_vst1q(int32_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s8)))
+void __arm_vst1q_s8(int8_t *, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s8)))
+void __arm_vst1q(int8_t *, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u16)))
+void __arm_vst1q_u16(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u16)))
+void __arm_vst1q(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u32)))
+void __arm_vst1q_u32(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u32)))
+void __arm_vst1q(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u8)))
+void __arm_vst1q_u8(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u8)))
+void __arm_vst1q(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s16)))
+void __arm_vst2q_s16(int16_t *, int16x8x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s16)))
+void __arm_vst2q(int16_t *, int16x8x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s32)))
+void __arm_vst2q_s32(int32_t *, int32x4x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s32)))
+void __arm_vst2q(int32_t *, int32x4x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s8)))
+void __arm_vst2q_s8(int8_t *, int8x16x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s8)))
+void __arm_vst2q(int8_t *, int8x16x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u16)))
+void __arm_vst2q_u16(uint16_t *, uint16x8x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u16)))
+void __arm_vst2q(uint16_t *, uint16x8x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u32)))
+void __arm_vst2q_u32(uint32_t *, uint32x4x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u32)))
+void __arm_vst2q(uint32_t *, uint32x4x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u8)))
+void __arm_vst2q_u8(uint8_t *, uint8x16x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u8)))
+void __arm_vst2q(uint8_t *, uint8x16x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s16)))
+void __arm_vst4q_s16(int16_t *, int16x8x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s16)))
+void __arm_vst4q(int16_t *, int16x8x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s32)))
+void __arm_vst4q_s32(int32_t *, int32x4x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s32)))
+void __arm_vst4q(int32_t *, int32x4x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s8)))
+void __arm_vst4q_s8(int8_t *, int8x16x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s8)))
+void __arm_vst4q(int8_t *, int8x16x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u16)))
+void __arm_vst4q_u16(uint16_t *, uint16x8x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u16)))
+void __arm_vst4q(uint16_t *, uint16x8x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u32)))
+void __arm_vst4q_u32(uint32_t *, uint32x4x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u32)))
+void __arm_vst4q(uint32_t *, uint32x4x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u8)))
+void __arm_vst4q_u8(uint8_t *, uint8x16x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u8)))
+void __arm_vst4q(uint8_t *, uint8x16x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s16)))
+void __arm_vstrbq_p_s16(int8_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s16)))
+void __arm_vstrbq_p(int8_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s32)))
+void __arm_vstrbq_p_s32(int8_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s32)))
+void __arm_vstrbq_p(int8_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s8)))
+void __arm_vstrbq_p_s8(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s8)))
+void __arm_vstrbq_p(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u16)))
+void __arm_vstrbq_p_u16(uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u16)))
+void __arm_vstrbq_p(uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u32)))
+void __arm_vstrbq_p_u32(uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u32)))
+void __arm_vstrbq_p(uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u8)))
+void __arm_vstrbq_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u8)))
+void __arm_vstrbq_p(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s16)))
+void __arm_vstrbq_s16(int8_t *, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s16)))
+void __arm_vstrbq(int8_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s32)))
+void __arm_vstrbq_s32(int8_t *, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s32)))
+void __arm_vstrbq(int8_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s8)))
+void __arm_vstrbq_s8(int8_t *, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s8)))
+void __arm_vstrbq(int8_t *, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
+void __arm_vstrbq_scatter_offset_p_s16(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
+void __arm_vstrbq_scatter_offset_p(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
+void __arm_vstrbq_scatter_offset_p_s32(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
+void __arm_vstrbq_scatter_offset_p(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
+void __arm_vstrbq_scatter_offset_p_s8(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
+void __arm_vstrbq_scatter_offset_p(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
+void __arm_vstrbq_scatter_offset_p_u16(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
+void __arm_vstrbq_scatter_offset_p(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
+void __arm_vstrbq_scatter_offset_p_u32(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
+void __arm_vstrbq_scatter_offset_p(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
+void __arm_vstrbq_scatter_offset_p_u8(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
+void __arm_vstrbq_scatter_offset_p(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
+void __arm_vstrbq_scatter_offset_s16(int8_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
+void __arm_vstrbq_scatter_offset(int8_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
+void __arm_vstrbq_scatter_offset_s32(int8_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
+void __arm_vstrbq_scatter_offset(int8_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
+void __arm_vstrbq_scatter_offset_s8(int8_t *, uint8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
+void __arm_vstrbq_scatter_offset(int8_t *, uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
+void __arm_vstrbq_scatter_offset_u16(uint8_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
+void __arm_vstrbq_scatter_offset(uint8_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
+void __arm_vstrbq_scatter_offset_u32(uint8_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
+void __arm_vstrbq_scatter_offset(uint8_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
+void __arm_vstrbq_scatter_offset_u8(uint8_t *, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
+void __arm_vstrbq_scatter_offset(uint8_t *, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u16)))
+void __arm_vstrbq_u16(uint8_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u16)))
+void __arm_vstrbq(uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u32)))
+void __arm_vstrbq_u32(uint8_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u32)))
+void __arm_vstrbq(uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u8)))
+void __arm_vstrbq_u8(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u8)))
+void __arm_vstrbq(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
+void __arm_vstrdq_scatter_base_p_s64(uint64x2_t, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
+void __arm_vstrdq_scatter_base_p(uint64x2_t, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
+void __arm_vstrdq_scatter_base_p_u64(uint64x2_t, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
+void __arm_vstrdq_scatter_base_p(uint64x2_t, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
+void __arm_vstrdq_scatter_base_s64(uint64x2_t, int, int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
+void __arm_vstrdq_scatter_base(uint64x2_t, int, int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
+void __arm_vstrdq_scatter_base_u64(uint64x2_t, int, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
+void __arm_vstrdq_scatter_base(uint64x2_t, int, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
+void __arm_vstrdq_scatter_base_wb_p_s64(uint64x2_t *, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
+void __arm_vstrdq_scatter_base_wb_p(uint64x2_t *, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
+void __arm_vstrdq_scatter_base_wb_p_u64(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
+void __arm_vstrdq_scatter_base_wb_p(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
+void __arm_vstrdq_scatter_base_wb_s64(uint64x2_t *, int, int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
+void __arm_vstrdq_scatter_base_wb(uint64x2_t *, int, int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
+void __arm_vstrdq_scatter_base_wb_u64(uint64x2_t *, int, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
+void __arm_vstrdq_scatter_base_wb(uint64x2_t *, int, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
+void __arm_vstrdq_scatter_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
+void __arm_vstrdq_scatter_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
+void __arm_vstrdq_scatter_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
+void __arm_vstrdq_scatter_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
+void __arm_vstrdq_scatter_offset_s64(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
+void __arm_vstrdq_scatter_offset(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
+void __arm_vstrdq_scatter_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
+void __arm_vstrdq_scatter_offset(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
+void __arm_vstrdq_scatter_shifted_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
+void __arm_vstrdq_scatter_shifted_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
+void __arm_vstrdq_scatter_shifted_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
+void __arm_vstrdq_scatter_shifted_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
+void __arm_vstrdq_scatter_shifted_offset_s64(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
+void __arm_vstrdq_scatter_shifted_offset(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
+void __arm_vstrdq_scatter_shifted_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
+void __arm_vstrdq_scatter_shifted_offset(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s16)))
+void __arm_vstrhq_p_s16(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s16)))
+void __arm_vstrhq_p(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s32)))
+void __arm_vstrhq_p_s32(int16_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s32)))
+void __arm_vstrhq_p(int16_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u16)))
+void __arm_vstrhq_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u16)))
+void __arm_vstrhq_p(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u32)))
+void __arm_vstrhq_p_u32(uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u32)))
+void __arm_vstrhq_p(uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s16)))
+void __arm_vstrhq_s16(int16_t *, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s16)))
+void __arm_vstrhq(int16_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s32)))
+void __arm_vstrhq_s32(int16_t *, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s32)))
+void __arm_vstrhq(int16_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
+void __arm_vstrhq_scatter_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
+void __arm_vstrhq_scatter_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
+void __arm_vstrhq_scatter_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
+void __arm_vstrhq_scatter_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
+void __arm_vstrhq_scatter_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
+void __arm_vstrhq_scatter_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
+void __arm_vstrhq_scatter_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
+void __arm_vstrhq_scatter_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
+void __arm_vstrhq_scatter_offset_s16(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
+void __arm_vstrhq_scatter_offset(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
+void __arm_vstrhq_scatter_offset_s32(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
+void __arm_vstrhq_scatter_offset(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
+void __arm_vstrhq_scatter_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
+void __arm_vstrhq_scatter_offset(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
+void __arm_vstrhq_scatter_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
+void __arm_vstrhq_scatter_offset(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
+void __arm_vstrhq_scatter_shifted_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
+void __arm_vstrhq_scatter_shifted_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
+void __arm_vstrhq_scatter_shifted_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
+void __arm_vstrhq_scatter_shifted_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
+void __arm_vstrhq_scatter_shifted_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
+void __arm_vstrhq_scatter_shifted_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
+void __arm_vstrhq_scatter_shifted_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
+void __arm_vstrhq_scatter_shifted_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
+void __arm_vstrhq_scatter_shifted_offset_s16(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
+void __arm_vstrhq_scatter_shifted_offset(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
+void __arm_vstrhq_scatter_shifted_offset_s32(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
+void __arm_vstrhq_scatter_shifted_offset(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
+void __arm_vstrhq_scatter_shifted_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
+void __arm_vstrhq_scatter_shifted_offset(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
+void __arm_vstrhq_scatter_shifted_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
+void __arm_vstrhq_scatter_shifted_offset(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u16)))
+void __arm_vstrhq_u16(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u16)))
+void __arm_vstrhq(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u32)))
+void __arm_vstrhq_u32(uint16_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u32)))
+void __arm_vstrhq(uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_s32)))
+void __arm_vstrwq_p_s32(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_s32)))
+void __arm_vstrwq_p(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_u32)))
+void __arm_vstrwq_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_u32)))
+void __arm_vstrwq_p(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_s32)))
+void __arm_vstrwq_s32(int32_t *, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_s32)))
+void __arm_vstrwq(int32_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
+void __arm_vstrwq_scatter_base_p_s32(uint32x4_t, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
+void __arm_vstrwq_scatter_base_p(uint32x4_t, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
+void __arm_vstrwq_scatter_base_p_u32(uint32x4_t, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
+void __arm_vstrwq_scatter_base_p(uint32x4_t, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
+void __arm_vstrwq_scatter_base_s32(uint32x4_t, int, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
+void __arm_vstrwq_scatter_base(uint32x4_t, int, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
+void __arm_vstrwq_scatter_base_u32(uint32x4_t, int, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
+void __arm_vstrwq_scatter_base(uint32x4_t, int, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
+void __arm_vstrwq_scatter_base_wb_p_s32(uint32x4_t *, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
+void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
+void __arm_vstrwq_scatter_base_wb_p_u32(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
+void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
+void __arm_vstrwq_scatter_base_wb_s32(uint32x4_t *, int, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
+void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
+void __arm_vstrwq_scatter_base_wb_u32(uint32x4_t *, int, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
+void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
+void __arm_vstrwq_scatter_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
+void __arm_vstrwq_scatter_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
+void __arm_vstrwq_scatter_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
+void __arm_vstrwq_scatter_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
+void __arm_vstrwq_scatter_offset_s32(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
+void __arm_vstrwq_scatter_offset(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
+void __arm_vstrwq_scatter_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
+void __arm_vstrwq_scatter_offset(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
+void __arm_vstrwq_scatter_shifted_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
+void __arm_vstrwq_scatter_shifted_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
+void __arm_vstrwq_scatter_shifted_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
+void __arm_vstrwq_scatter_shifted_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
+void __arm_vstrwq_scatter_shifted_offset_s32(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
+void __arm_vstrwq_scatter_shifted_offset(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
+void __arm_vstrwq_scatter_shifted_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
+void __arm_vstrwq_scatter_shifted_offset(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_u32)))
+void __arm_vstrwq_u32(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_u32)))
+void __arm_vstrwq(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s16)))
+int16x8_t __arm_vsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s16)))
+int16x8_t __arm_vsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s32)))
+int32x4_t __arm_vsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s32)))
+int32x4_t __arm_vsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s8)))
+int8x16_t __arm_vsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s8)))
+int8x16_t __arm_vsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u16)))
+uint16x8_t __arm_vsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u16)))
+uint16x8_t __arm_vsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u32)))
+uint32x4_t __arm_vsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u32)))
+uint32x4_t __arm_vsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u8)))
+uint8x16_t __arm_vsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u8)))
+uint8x16_t __arm_vsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s16)))
+int16x8_t __arm_vsubq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s16)))
+int16x8_t __arm_vsubq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s32)))
+int32x4_t __arm_vsubq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s32)))
+int32x4_t __arm_vsubq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s8)))
+int8x16_t __arm_vsubq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s8)))
+int8x16_t __arm_vsubq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u16)))
+uint16x8_t __arm_vsubq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u16)))
+uint16x8_t __arm_vsubq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u32)))
+uint32x4_t __arm_vsubq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u32)))
+uint32x4_t __arm_vsubq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u8)))
+uint8x16_t __arm_vsubq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u8)))
+uint8x16_t __arm_vsubq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s16)))
+int16x8_t __arm_vuninitializedq(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s32)))
+int32x4_t __arm_vuninitializedq(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s64)))
+int64x2_t __arm_vuninitializedq(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s8)))
+int8x16_t __arm_vuninitializedq(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u16)))
+uint16x8_t __arm_vuninitializedq(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u32)))
+uint32x4_t __arm_vuninitializedq(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u64)))
+uint64x2_t __arm_vuninitializedq(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u8)))
+uint8x16_t __arm_vuninitializedq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s16)))
+int16x8_t __arm_vuninitializedq_s16();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s32)))
+int32x4_t __arm_vuninitializedq_s32();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s64)))
+int64x2_t __arm_vuninitializedq_s64();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s8)))
+int8x16_t __arm_vuninitializedq_s8();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u16)))
+uint16x8_t __arm_vuninitializedq_u16();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u32)))
+uint32x4_t __arm_vuninitializedq_u32();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u64)))
+uint64x2_t __arm_vuninitializedq_u64();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u8)))
+uint8x16_t __arm_vuninitializedq_u8();
+
+#if (__ARM_FEATURE_MVE & 2)
+
+typedef __fp16 float16_t;
+typedef float float32_t;
+typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t;
+typedef struct { float16x8_t val[2]; } float16x8x2_t;
+typedef struct { float16x8_t val[4]; } float16x8x4_t;
+typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t;
+typedef struct { float32x4_t val[2]; } float32x4x2_t;
+typedef struct { float32x4_t val[4]; } float32x4x4_t;
+
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_f16)))
+float16x8_t __arm_vaddq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_f16)))
+float16x8_t __arm_vaddq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_f32)))
+float32x4_t __arm_vaddq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_f32)))
+float32x4_t __arm_vaddq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f16)))
+float16x8_t __arm_vaddq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f16)))
+float16x8_t __arm_vaddq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f32)))
+float32x4_t __arm_vaddq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f32)))
+float32x4_t __arm_vaddq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f16)))
+mve_pred16_t __arm_vcmpeqq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f16)))
+mve_pred16_t __arm_vcmpeqq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f32)))
+mve_pred16_t __arm_vcmpeqq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f32)))
+mve_pred16_t __arm_vcmpeqq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
+mve_pred16_t __arm_vcmpeqq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
+mve_pred16_t __arm_vcmpeqq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
+mve_pred16_t __arm_vcmpeqq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
+mve_pred16_t __arm_vcmpeqq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
+mve_pred16_t __arm_vcmpeqq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
+mve_pred16_t __arm_vcmpeqq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
+mve_pred16_t __arm_vcmpeqq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
+mve_pred16_t __arm_vcmpeqq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
+mve_pred16_t __arm_vcmpeqq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
+mve_pred16_t __arm_vcmpeqq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
+mve_pred16_t __arm_vcmpeqq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
+mve_pred16_t __arm_vcmpeqq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f16)))
+mve_pred16_t __arm_vcmpgeq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f16)))
+mve_pred16_t __arm_vcmpgeq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f32)))
+mve_pred16_t __arm_vcmpgeq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f32)))
+mve_pred16_t __arm_vcmpgeq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
+mve_pred16_t __arm_vcmpgeq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
+mve_pred16_t __arm_vcmpgeq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
+mve_pred16_t __arm_vcmpgeq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
+mve_pred16_t __arm_vcmpgeq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
+mve_pred16_t __arm_vcmpgeq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
+mve_pred16_t __arm_vcmpgeq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
+mve_pred16_t __arm_vcmpgeq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
+mve_pred16_t __arm_vcmpgeq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
+mve_pred16_t __arm_vcmpgeq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
+mve_pred16_t __arm_vcmpgeq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
+mve_pred16_t __arm_vcmpgeq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
+mve_pred16_t __arm_vcmpgeq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f16)))
+mve_pred16_t __arm_vcmpgtq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f16)))
+mve_pred16_t __arm_vcmpgtq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f32)))
+mve_pred16_t __arm_vcmpgtq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f32)))
+mve_pred16_t __arm_vcmpgtq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
+mve_pred16_t __arm_vcmpgtq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
+mve_pred16_t __arm_vcmpgtq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
+mve_pred16_t __arm_vcmpgtq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
+mve_pred16_t __arm_vcmpgtq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
+mve_pred16_t __arm_vcmpgtq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
+mve_pred16_t __arm_vcmpgtq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
+mve_pred16_t __arm_vcmpgtq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
+mve_pred16_t __arm_vcmpgtq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
+mve_pred16_t __arm_vcmpgtq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
+mve_pred16_t __arm_vcmpgtq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
+mve_pred16_t __arm_vcmpgtq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
+mve_pred16_t __arm_vcmpgtq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f16)))
+mve_pred16_t __arm_vcmpleq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f16)))
+mve_pred16_t __arm_vcmpleq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f32)))
+mve_pred16_t __arm_vcmpleq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f32)))
+mve_pred16_t __arm_vcmpleq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f16)))
+mve_pred16_t __arm_vcmpleq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f16)))
+mve_pred16_t __arm_vcmpleq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f32)))
+mve_pred16_t __arm_vcmpleq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f32)))
+mve_pred16_t __arm_vcmpleq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
+mve_pred16_t __arm_vcmpleq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
+mve_pred16_t __arm_vcmpleq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
+mve_pred16_t __arm_vcmpleq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
+mve_pred16_t __arm_vcmpleq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f16)))
+mve_pred16_t __arm_vcmpleq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f16)))
+mve_pred16_t __arm_vcmpleq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f32)))
+mve_pred16_t __arm_vcmpleq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f32)))
+mve_pred16_t __arm_vcmpleq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f16)))
+mve_pred16_t __arm_vcmpltq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f16)))
+mve_pred16_t __arm_vcmpltq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f32)))
+mve_pred16_t __arm_vcmpltq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f32)))
+mve_pred16_t __arm_vcmpltq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f16)))
+mve_pred16_t __arm_vcmpltq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f16)))
+mve_pred16_t __arm_vcmpltq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f32)))
+mve_pred16_t __arm_vcmpltq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f32)))
+mve_pred16_t __arm_vcmpltq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
+mve_pred16_t __arm_vcmpltq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
+mve_pred16_t __arm_vcmpltq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
+mve_pred16_t __arm_vcmpltq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
+mve_pred16_t __arm_vcmpltq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f16)))
+mve_pred16_t __arm_vcmpltq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f16)))
+mve_pred16_t __arm_vcmpltq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f32)))
+mve_pred16_t __arm_vcmpltq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f32)))
+mve_pred16_t __arm_vcmpltq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f16)))
+mve_pred16_t __arm_vcmpneq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f16)))
+mve_pred16_t __arm_vcmpneq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f32)))
+mve_pred16_t __arm_vcmpneq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f32)))
+mve_pred16_t __arm_vcmpneq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f16)))
+mve_pred16_t __arm_vcmpneq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f16)))
+mve_pred16_t __arm_vcmpneq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f32)))
+mve_pred16_t __arm_vcmpneq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f32)))
+mve_pred16_t __arm_vcmpneq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
+mve_pred16_t __arm_vcmpneq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
+mve_pred16_t __arm_vcmpneq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
+mve_pred16_t __arm_vcmpneq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
+mve_pred16_t __arm_vcmpneq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f16)))
+mve_pred16_t __arm_vcmpneq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f16)))
+mve_pred16_t __arm_vcmpneq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f32)))
+mve_pred16_t __arm_vcmpneq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f32)))
+mve_pred16_t __arm_vcmpneq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_f16)))
+float16x8_t __arm_vcreateq_f16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_f32)))
+float32x4_t __arm_vcreateq_f32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtbq_f16_f32)))
+float16x8_t __arm_vcvtbq_f16_f32(float16x8_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtbq_m_f16_f32)))
+float16x8_t __arm_vcvtbq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvttq_f16_f32)))
+float16x8_t __arm_vcvttq_f16_f32(float16x8_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvttq_m_f16_f32)))
+float16x8_t __arm_vcvttq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f16)))
+float16_t __arm_vgetq_lane_f16(float16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f16)))
+float16_t __arm_vgetq_lane(float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f32)))
+float32_t __arm_vgetq_lane_f32(float32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f32)))
+float32_t __arm_vgetq_lane(float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_f16)))
+float16x8_t __arm_vld1q_f16(const float16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_f16)))
+float16x8_t __arm_vld1q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_f32)))
+float32x4_t __arm_vld1q_f32(const float32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_f32)))
+float32x4_t __arm_vld1q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f16)))
+float16x8_t __arm_vld1q_z_f16(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f16)))
+float16x8_t __arm_vld1q_z(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f32)))
+float32x4_t __arm_vld1q_z_f32(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f32)))
+float32x4_t __arm_vld1q_z(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_f16)))
+float16x8x2_t __arm_vld2q_f16(const float16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_f16)))
+float16x8x2_t __arm_vld2q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_f32)))
+float32x4x2_t __arm_vld2q_f32(const float32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_f32)))
+float32x4x2_t __arm_vld2q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_f16)))
+float16x8x4_t __arm_vld4q_f16(const float16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_f16)))
+float16x8x4_t __arm_vld4q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_f32)))
+float32x4x4_t __arm_vld4q_f32(const float32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_f32)))
+float32x4x4_t __arm_vld4q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_f16)))
+float16x8_t __arm_vldrhq_f16(const float16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
+float16x8_t __arm_vldrhq_gather_offset_f16(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
+float16x8_t __arm_vldrhq_gather_offset(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
+float16x8_t __arm_vldrhq_gather_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
+float16x8_t __arm_vldrhq_gather_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
+float16x8_t __arm_vldrhq_gather_shifted_offset_f16(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
+float16x8_t __arm_vldrhq_gather_shifted_offset(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
+float16x8_t __arm_vldrhq_gather_shifted_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
+float16x8_t __arm_vldrhq_gather_shifted_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_f16)))
+float16x8_t __arm_vldrhq_z_f16(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_f32)))
+float32x4_t __arm_vldrwq_f32(const float32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_f32)))
+float32x4_t __arm_vldrwq_gather_base_f32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_f32)))
+float32x4_t __arm_vldrwq_gather_base_wb_f32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_f32)))
+float32x4_t __arm_vldrwq_gather_base_wb_z_f32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_f32)))
+float32x4_t __arm_vldrwq_gather_base_z_f32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
+float32x4_t __arm_vldrwq_gather_offset_f32(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
+float32x4_t __arm_vldrwq_gather_offset(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
+float32x4_t __arm_vldrwq_gather_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
+float32x4_t __arm_vldrwq_gather_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
+float32x4_t __arm_vldrwq_gather_shifted_offset_f32(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
+float32x4_t __arm_vldrwq_gather_shifted_offset(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
+float32x4_t __arm_vldrwq_gather_shifted_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
+float32x4_t __arm_vldrwq_gather_shifted_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_f32)))
+float32x4_t __arm_vldrwq_z_f32(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
+float16x8_t __arm_vreinterpretq_f16_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
+float16x8_t __arm_vreinterpretq_f16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
+float16x8_t __arm_vreinterpretq_f16_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
+float16x8_t __arm_vreinterpretq_f16(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
+float16x8_t __arm_vreinterpretq_f16_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
+float16x8_t __arm_vreinterpretq_f16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
+float16x8_t __arm_vreinterpretq_f16_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
+float16x8_t __arm_vreinterpretq_f16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
+float16x8_t __arm_vreinterpretq_f16_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
+float16x8_t __arm_vreinterpretq_f16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
+float16x8_t __arm_vreinterpretq_f16_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
+float16x8_t __arm_vreinterpretq_f16(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
+float16x8_t __arm_vreinterpretq_f16_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
+float16x8_t __arm_vreinterpretq_f16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
+float16x8_t __arm_vreinterpretq_f16_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
+float16x8_t __arm_vreinterpretq_f16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
+float16x8_t __arm_vreinterpretq_f16_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
+float16x8_t __arm_vreinterpretq_f16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
+float32x4_t __arm_vreinterpretq_f32_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
+float32x4_t __arm_vreinterpretq_f32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
+float32x4_t __arm_vreinterpretq_f32_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
+float32x4_t __arm_vreinterpretq_f32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
+float32x4_t __arm_vreinterpretq_f32_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
+float32x4_t __arm_vreinterpretq_f32(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
+float32x4_t __arm_vreinterpretq_f32_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
+float32x4_t __arm_vreinterpretq_f32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
+float32x4_t __arm_vreinterpretq_f32_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
+float32x4_t __arm_vreinterpretq_f32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
+float32x4_t __arm_vreinterpretq_f32_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
+float32x4_t __arm_vreinterpretq_f32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
+float32x4_t __arm_vreinterpretq_f32_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
+float32x4_t __arm_vreinterpretq_f32(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
+float32x4_t __arm_vreinterpretq_f32_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
+float32x4_t __arm_vreinterpretq_f32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
+float32x4_t __arm_vreinterpretq_f32_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
+float32x4_t __arm_vreinterpretq_f32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
+int16x8_t __arm_vreinterpretq_s16_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
+int16x8_t __arm_vreinterpretq_s16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
+int16x8_t __arm_vreinterpretq_s16_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
+int16x8_t __arm_vreinterpretq_s16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
+int32x4_t __arm_vreinterpretq_s32_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
+int32x4_t __arm_vreinterpretq_s32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
+int32x4_t __arm_vreinterpretq_s32_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
+int32x4_t __arm_vreinterpretq_s32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
+int64x2_t __arm_vreinterpretq_s64_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
+int64x2_t __arm_vreinterpretq_s64(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
+int64x2_t __arm_vreinterpretq_s64_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
+int64x2_t __arm_vreinterpretq_s64(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
+int8x16_t __arm_vreinterpretq_s8_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
+int8x16_t __arm_vreinterpretq_s8(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
+int8x16_t __arm_vreinterpretq_s8_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
+int8x16_t __arm_vreinterpretq_s8(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
+uint16x8_t __arm_vreinterpretq_u16_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
+uint16x8_t __arm_vreinterpretq_u16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
+uint16x8_t __arm_vreinterpretq_u16_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
+uint16x8_t __arm_vreinterpretq_u16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
+uint32x4_t __arm_vreinterpretq_u32_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
+uint32x4_t __arm_vreinterpretq_u32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
+uint32x4_t __arm_vreinterpretq_u32_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
+uint32x4_t __arm_vreinterpretq_u32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
+uint64x2_t __arm_vreinterpretq_u64_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
+uint64x2_t __arm_vreinterpretq_u64(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
+uint64x2_t __arm_vreinterpretq_u64_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
+uint64x2_t __arm_vreinterpretq_u64(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
+uint8x16_t __arm_vreinterpretq_u8_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
+uint8x16_t __arm_vreinterpretq_u8(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
+uint8x16_t __arm_vreinterpretq_u8_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
+uint8x16_t __arm_vreinterpretq_u8(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f16)))
+float16x8_t __arm_vsetq_lane_f16(float16_t, float16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f16)))
+float16x8_t __arm_vsetq_lane(float16_t, float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f32)))
+float32x4_t __arm_vsetq_lane_f32(float32_t, float32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f32)))
+float32x4_t __arm_vsetq_lane(float32_t, float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_f16)))
+void __arm_vst1q_f16(float16_t *, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_f16)))
+void __arm_vst1q(float16_t *, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_f32)))
+void __arm_vst1q_f32(float32_t *, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_f32)))
+void __arm_vst1q(float32_t *, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f16)))
+void __arm_vst1q_p_f16(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f16)))
+void __arm_vst1q_p(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f32)))
+void __arm_vst1q_p_f32(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f32)))
+void __arm_vst1q_p(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_f16)))
+void __arm_vst2q_f16(float16_t *, float16x8x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_f16)))
+void __arm_vst2q(float16_t *, float16x8x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_f32)))
+void __arm_vst2q_f32(float32_t *, float32x4x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_f32)))
+void __arm_vst2q(float32_t *, float32x4x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_f16)))
+void __arm_vst4q_f16(float16_t *, float16x8x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_f16)))
+void __arm_vst4q(float16_t *, float16x8x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_f32)))
+void __arm_vst4q_f32(float32_t *, float32x4x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_f32)))
+void __arm_vst4q(float32_t *, float32x4x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_f16)))
+void __arm_vstrhq_f16(float16_t *, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_f16)))
+void __arm_vstrhq(float16_t *, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_f16)))
+void __arm_vstrhq_p_f16(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_f16)))
+void __arm_vstrhq_p(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
+void __arm_vstrhq_scatter_offset_f16(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
+void __arm_vstrhq_scatter_offset(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
+void __arm_vstrhq_scatter_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
+void __arm_vstrhq_scatter_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
+void __arm_vstrhq_scatter_shifted_offset_f16(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
+void __arm_vstrhq_scatter_shifted_offset(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
+void __arm_vstrhq_scatter_shifted_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
+void __arm_vstrhq_scatter_shifted_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_f32)))
+void __arm_vstrwq_f32(float32_t *, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_f32)))
+void __arm_vstrwq(float32_t *, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_f32)))
+void __arm_vstrwq_p_f32(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_f32)))
+void __arm_vstrwq_p(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
+void __arm_vstrwq_scatter_base_f32(uint32x4_t, int, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
+void __arm_vstrwq_scatter_base(uint32x4_t, int, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
+void __arm_vstrwq_scatter_base_p_f32(uint32x4_t, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
+void __arm_vstrwq_scatter_base_p(uint32x4_t, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
+void __arm_vstrwq_scatter_base_wb_f32(uint32x4_t *, int, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
+void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
+void __arm_vstrwq_scatter_base_wb_p_f32(uint32x4_t *, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
+void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
+void __arm_vstrwq_scatter_offset_f32(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
+void __arm_vstrwq_scatter_offset(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
+void __arm_vstrwq_scatter_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
+void __arm_vstrwq_scatter_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
+void __arm_vstrwq_scatter_shifted_offset_f32(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
+void __arm_vstrwq_scatter_shifted_offset(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
+void __arm_vstrwq_scatter_shifted_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
+void __arm_vstrwq_scatter_shifted_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_f16)))
+float16x8_t __arm_vsubq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_f16)))
+float16x8_t __arm_vsubq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_f32)))
+float32x4_t __arm_vsubq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_f32)))
+float32x4_t __arm_vsubq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f16)))
+float16x8_t __arm_vsubq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f16)))
+float16x8_t __arm_vsubq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f32)))
+float32x4_t __arm_vsubq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f32)))
+float32x4_t __arm_vsubq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_f16)))
+float16x8_t __arm_vuninitializedq_f16();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_f32)))
+float32x4_t __arm_vuninitializedq_f32();
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f16)))
+float16x8_t __arm_vuninitializedq(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f32)))
+float32x4_t __arm_vuninitializedq(float32x4_t);
+
+#endif /* (__ARM_FEATURE_MVE & 2) */
+
+#if (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE)
+
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_asrl)))
+int64_t asrl(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_lsll)))
+uint64_t lsll(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshr)))
+int32_t sqrshr(int32_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshrl)))
+int64_t sqrshrl(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshrl_sat48)))
+int64_t sqrshrl_sat48(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqshl)))
+int32_t sqshl(int32_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqshll)))
+int64_t sqshll(int64_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_srshr)))
+int32_t srshr(int32_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_srshrl)))
+int64_t srshrl(int64_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshl)))
+uint32_t uqrshl(uint32_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshll)))
+uint64_t uqrshll(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshll_sat48)))
+uint64_t uqrshll_sat48(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqshl)))
+uint32_t uqshl(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqshll)))
+uint64_t uqshll(uint64_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_urshr)))
+uint32_t urshr(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_urshrl)))
+uint64_t urshrl(uint64_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_s32)))
+int32x4_t vadciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_s32)))
+int32x4_t vadciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_u32)))
+uint32x4_t vadciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_u32)))
+uint32x4_t vadciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_s32)))
+int32x4_t vadciq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_s32)))
+int32x4_t vadciq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_u32)))
+uint32x4_t vadciq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_u32)))
+uint32x4_t vadciq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_s32)))
+int32x4_t vadcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_s32)))
+int32x4_t vadcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_u32)))
+uint32x4_t vadcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_u32)))
+uint32x4_t vadcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_s32)))
+int32x4_t vadcq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_s32)))
+int32x4_t vadcq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_u32)))
+uint32x4_t vadcq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_u32)))
+uint32x4_t vadcq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s16)))
+int16x8_t vaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s16)))
+int16x8_t vaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s32)))
+int32x4_t vaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s32)))
+int32x4_t vaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s8)))
+int8x16_t vaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s8)))
+int8x16_t vaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u16)))
+uint16x8_t vaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u16)))
+uint16x8_t vaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u32)))
+uint32x4_t vaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u32)))
+uint32x4_t vaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u8)))
+uint8x16_t vaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u8)))
+uint8x16_t vaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s16)))
+int16x8_t vaddq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s16)))
+int16x8_t vaddq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s32)))
+int32x4_t vaddq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s32)))
+int32x4_t vaddq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s8)))
+int8x16_t vaddq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s8)))
+int8x16_t vaddq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u16)))
+uint16x8_t vaddq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u16)))
+uint16x8_t vaddq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u32)))
+uint32x4_t vaddq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u32)))
+uint32x4_t vaddq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u8)))
+uint8x16_t vaddq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u8)))
+uint8x16_t vaddq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
+mve_pred16_t vcmpcsq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
+mve_pred16_t vcmpcsq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
+mve_pred16_t vcmpcsq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
+mve_pred16_t vcmpcsq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
+mve_pred16_t vcmpcsq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
+mve_pred16_t vcmpcsq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
+mve_pred16_t vcmpcsq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
+mve_pred16_t vcmpcsq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
+mve_pred16_t vcmpcsq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
+mve_pred16_t vcmpcsq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
+mve_pred16_t vcmpcsq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
+mve_pred16_t vcmpcsq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
+mve_pred16_t vcmpcsq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
+mve_pred16_t vcmpcsq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
+mve_pred16_t vcmpcsq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
+mve_pred16_t vcmpcsq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
+mve_pred16_t vcmpcsq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
+mve_pred16_t vcmpcsq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u16)))
+mve_pred16_t vcmpcsq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u16)))
+mve_pred16_t vcmpcsq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u32)))
+mve_pred16_t vcmpcsq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u32)))
+mve_pred16_t vcmpcsq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u8)))
+mve_pred16_t vcmpcsq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u8)))
+mve_pred16_t vcmpcsq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
+mve_pred16_t vcmpeqq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
+mve_pred16_t vcmpeqq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
+mve_pred16_t vcmpeqq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
+mve_pred16_t vcmpeqq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
+mve_pred16_t vcmpeqq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
+mve_pred16_t vcmpeqq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
+mve_pred16_t vcmpeqq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
+mve_pred16_t vcmpeqq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
+mve_pred16_t vcmpeqq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
+mve_pred16_t vcmpeqq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
+mve_pred16_t vcmpeqq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
+mve_pred16_t vcmpeqq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
+mve_pred16_t vcmpeqq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
+mve_pred16_t vcmpeqq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
+mve_pred16_t vcmpeqq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
+mve_pred16_t vcmpeqq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
+mve_pred16_t vcmpeqq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
+mve_pred16_t vcmpeqq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
+mve_pred16_t vcmpeqq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
+mve_pred16_t vcmpeqq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
+mve_pred16_t vcmpeqq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
+mve_pred16_t vcmpeqq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
+mve_pred16_t vcmpeqq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
+mve_pred16_t vcmpeqq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
+mve_pred16_t vcmpeqq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
+mve_pred16_t vcmpeqq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
+mve_pred16_t vcmpeqq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
+mve_pred16_t vcmpeqq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
+mve_pred16_t vcmpeqq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
+mve_pred16_t vcmpeqq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
+mve_pred16_t vcmpeqq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
+mve_pred16_t vcmpeqq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
+mve_pred16_t vcmpeqq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
+mve_pred16_t vcmpeqq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
+mve_pred16_t vcmpeqq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
+mve_pred16_t vcmpeqq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s16)))
+mve_pred16_t vcmpeqq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s16)))
+mve_pred16_t vcmpeqq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s32)))
+mve_pred16_t vcmpeqq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s32)))
+mve_pred16_t vcmpeqq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s8)))
+mve_pred16_t vcmpeqq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s8)))
+mve_pred16_t vcmpeqq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u16)))
+mve_pred16_t vcmpeqq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u16)))
+mve_pred16_t vcmpeqq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u32)))
+mve_pred16_t vcmpeqq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u32)))
+mve_pred16_t vcmpeqq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u8)))
+mve_pred16_t vcmpeqq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u8)))
+mve_pred16_t vcmpeqq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
+mve_pred16_t vcmpgeq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
+mve_pred16_t vcmpgeq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
+mve_pred16_t vcmpgeq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
+mve_pred16_t vcmpgeq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
+mve_pred16_t vcmpgeq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
+mve_pred16_t vcmpgeq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
+mve_pred16_t vcmpgeq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
+mve_pred16_t vcmpgeq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
+mve_pred16_t vcmpgeq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
+mve_pred16_t vcmpgeq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
+mve_pred16_t vcmpgeq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
+mve_pred16_t vcmpgeq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
+mve_pred16_t vcmpgeq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
+mve_pred16_t vcmpgeq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
+mve_pred16_t vcmpgeq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
+mve_pred16_t vcmpgeq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
+mve_pred16_t vcmpgeq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
+mve_pred16_t vcmpgeq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s16)))
+mve_pred16_t vcmpgeq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s16)))
+mve_pred16_t vcmpgeq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s32)))
+mve_pred16_t vcmpgeq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s32)))
+mve_pred16_t vcmpgeq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s8)))
+mve_pred16_t vcmpgeq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s8)))
+mve_pred16_t vcmpgeq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
+mve_pred16_t vcmpgtq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
+mve_pred16_t vcmpgtq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
+mve_pred16_t vcmpgtq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
+mve_pred16_t vcmpgtq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
+mve_pred16_t vcmpgtq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
+mve_pred16_t vcmpgtq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
+mve_pred16_t vcmpgtq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
+mve_pred16_t vcmpgtq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
+mve_pred16_t vcmpgtq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
+mve_pred16_t vcmpgtq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
+mve_pred16_t vcmpgtq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
+mve_pred16_t vcmpgtq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
+mve_pred16_t vcmpgtq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
+mve_pred16_t vcmpgtq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
+mve_pred16_t vcmpgtq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
+mve_pred16_t vcmpgtq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
+mve_pred16_t vcmpgtq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
+mve_pred16_t vcmpgtq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s16)))
+mve_pred16_t vcmpgtq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s16)))
+mve_pred16_t vcmpgtq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s32)))
+mve_pred16_t vcmpgtq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s32)))
+mve_pred16_t vcmpgtq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s8)))
+mve_pred16_t vcmpgtq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s8)))
+mve_pred16_t vcmpgtq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
+mve_pred16_t vcmphiq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
+mve_pred16_t vcmphiq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
+mve_pred16_t vcmphiq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
+mve_pred16_t vcmphiq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
+mve_pred16_t vcmphiq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
+mve_pred16_t vcmphiq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u16)))
+mve_pred16_t vcmphiq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u16)))
+mve_pred16_t vcmphiq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u32)))
+mve_pred16_t vcmphiq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u32)))
+mve_pred16_t vcmphiq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u8)))
+mve_pred16_t vcmphiq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u8)))
+mve_pred16_t vcmphiq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u16)))
+mve_pred16_t vcmphiq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u16)))
+mve_pred16_t vcmphiq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u32)))
+mve_pred16_t vcmphiq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u32)))
+mve_pred16_t vcmphiq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u8)))
+mve_pred16_t vcmphiq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u8)))
+mve_pred16_t vcmphiq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u16)))
+mve_pred16_t vcmphiq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u16)))
+mve_pred16_t vcmphiq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u32)))
+mve_pred16_t vcmphiq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u32)))
+mve_pred16_t vcmphiq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u8)))
+mve_pred16_t vcmphiq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u8)))
+mve_pred16_t vcmphiq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
+mve_pred16_t vcmpleq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
+mve_pred16_t vcmpleq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
+mve_pred16_t vcmpleq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
+mve_pred16_t vcmpleq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
+mve_pred16_t vcmpleq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
+mve_pred16_t vcmpleq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s16)))
+mve_pred16_t vcmpleq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s16)))
+mve_pred16_t vcmpleq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s32)))
+mve_pred16_t vcmpleq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s32)))
+mve_pred16_t vcmpleq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s8)))
+mve_pred16_t vcmpleq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s8)))
+mve_pred16_t vcmpleq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s16)))
+mve_pred16_t vcmpleq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s16)))
+mve_pred16_t vcmpleq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s32)))
+mve_pred16_t vcmpleq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s32)))
+mve_pred16_t vcmpleq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s8)))
+mve_pred16_t vcmpleq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s8)))
+mve_pred16_t vcmpleq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s16)))
+mve_pred16_t vcmpleq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s16)))
+mve_pred16_t vcmpleq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s32)))
+mve_pred16_t vcmpleq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s32)))
+mve_pred16_t vcmpleq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s8)))
+mve_pred16_t vcmpleq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s8)))
+mve_pred16_t vcmpleq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
+mve_pred16_t vcmpltq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
+mve_pred16_t vcmpltq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
+mve_pred16_t vcmpltq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
+mve_pred16_t vcmpltq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
+mve_pred16_t vcmpltq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
+mve_pred16_t vcmpltq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s16)))
+mve_pred16_t vcmpltq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s16)))
+mve_pred16_t vcmpltq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s32)))
+mve_pred16_t vcmpltq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s32)))
+mve_pred16_t vcmpltq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s8)))
+mve_pred16_t vcmpltq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s8)))
+mve_pred16_t vcmpltq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s16)))
+mve_pred16_t vcmpltq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s16)))
+mve_pred16_t vcmpltq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s32)))
+mve_pred16_t vcmpltq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s32)))
+mve_pred16_t vcmpltq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s8)))
+mve_pred16_t vcmpltq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s8)))
+mve_pred16_t vcmpltq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s16)))
+mve_pred16_t vcmpltq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s16)))
+mve_pred16_t vcmpltq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s32)))
+mve_pred16_t vcmpltq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s32)))
+mve_pred16_t vcmpltq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s8)))
+mve_pred16_t vcmpltq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s8)))
+mve_pred16_t vcmpltq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
+mve_pred16_t vcmpneq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
+mve_pred16_t vcmpneq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
+mve_pred16_t vcmpneq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
+mve_pred16_t vcmpneq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
+mve_pred16_t vcmpneq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
+mve_pred16_t vcmpneq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
+mve_pred16_t vcmpneq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
+mve_pred16_t vcmpneq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
+mve_pred16_t vcmpneq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
+mve_pred16_t vcmpneq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
+mve_pred16_t vcmpneq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
+mve_pred16_t vcmpneq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s16)))
+mve_pred16_t vcmpneq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s16)))
+mve_pred16_t vcmpneq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s32)))
+mve_pred16_t vcmpneq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s32)))
+mve_pred16_t vcmpneq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s8)))
+mve_pred16_t vcmpneq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s8)))
+mve_pred16_t vcmpneq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u16)))
+mve_pred16_t vcmpneq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u16)))
+mve_pred16_t vcmpneq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u32)))
+mve_pred16_t vcmpneq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u32)))
+mve_pred16_t vcmpneq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u8)))
+mve_pred16_t vcmpneq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u8)))
+mve_pred16_t vcmpneq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s16)))
+mve_pred16_t vcmpneq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s16)))
+mve_pred16_t vcmpneq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s32)))
+mve_pred16_t vcmpneq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s32)))
+mve_pred16_t vcmpneq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s8)))
+mve_pred16_t vcmpneq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s8)))
+mve_pred16_t vcmpneq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u16)))
+mve_pred16_t vcmpneq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u16)))
+mve_pred16_t vcmpneq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u32)))
+mve_pred16_t vcmpneq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u32)))
+mve_pred16_t vcmpneq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u8)))
+mve_pred16_t vcmpneq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u8)))
+mve_pred16_t vcmpneq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s16)))
+mve_pred16_t vcmpneq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s16)))
+mve_pred16_t vcmpneq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s32)))
+mve_pred16_t vcmpneq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s32)))
+mve_pred16_t vcmpneq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s8)))
+mve_pred16_t vcmpneq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s8)))
+mve_pred16_t vcmpneq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u16)))
+mve_pred16_t vcmpneq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u16)))
+mve_pred16_t vcmpneq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u32)))
+mve_pred16_t vcmpneq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u32)))
+mve_pred16_t vcmpneq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u8)))
+mve_pred16_t vcmpneq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u8)))
+mve_pred16_t vcmpneq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s16)))
+int16x8_t vcreateq_s16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s32)))
+int32x4_t vcreateq_s32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s64)))
+int64x2_t vcreateq_s64(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s8)))
+int8x16_t vcreateq_s8(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u16)))
+uint16x8_t vcreateq_u16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u32)))
+uint32x4_t vcreateq_u32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u64)))
+uint64x2_t vcreateq_u64(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u8)))
+uint8x16_t vcreateq_u8(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s16)))
+int16_t vgetq_lane_s16(int16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s16)))
+int16_t vgetq_lane(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s32)))
+int32_t vgetq_lane_s32(int32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s32)))
+int32_t vgetq_lane(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s64)))
+int64_t vgetq_lane_s64(int64x2_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s64)))
+int64_t vgetq_lane(int64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s8)))
+int8_t vgetq_lane_s8(int8x16_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s8)))
+int8_t vgetq_lane(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u16)))
+uint16_t vgetq_lane_u16(uint16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u16)))
+uint16_t vgetq_lane(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u32)))
+uint32_t vgetq_lane_u32(uint32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u32)))
+uint32_t vgetq_lane(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u64)))
+uint64_t vgetq_lane_u64(uint64x2_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u64)))
+uint64_t vgetq_lane(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u8)))
+uint8_t vgetq_lane_u8(uint8x16_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u8)))
+uint8_t vgetq_lane(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s16)))
+int16x8_t vld1q_s16(const int16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s16)))
+int16x8_t vld1q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s32)))
+int32x4_t vld1q_s32(const int32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s32)))
+int32x4_t vld1q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s8)))
+int8x16_t vld1q_s8(const int8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s8)))
+int8x16_t vld1q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u16)))
+uint16x8_t vld1q_u16(const uint16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u16)))
+uint16x8_t vld1q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u32)))
+uint32x4_t vld1q_u32(const uint32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u32)))
+uint32x4_t vld1q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u8)))
+uint8x16_t vld1q_u8(const uint8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u8)))
+uint8x16_t vld1q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s16)))
+int16x8_t vld1q_z_s16(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s16)))
+int16x8_t vld1q_z(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s32)))
+int32x4_t vld1q_z_s32(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s32)))
+int32x4_t vld1q_z(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s8)))
+int8x16_t vld1q_z_s8(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s8)))
+int8x16_t vld1q_z(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u16)))
+uint16x8_t vld1q_z_u16(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u16)))
+uint16x8_t vld1q_z(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u32)))
+uint32x4_t vld1q_z_u32(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u32)))
+uint32x4_t vld1q_z(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u8)))
+uint8x16_t vld1q_z_u8(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u8)))
+uint8x16_t vld1q_z(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s16)))
+int16x8x2_t vld2q_s16(const int16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s16)))
+int16x8x2_t vld2q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s32)))
+int32x4x2_t vld2q_s32(const int32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s32)))
+int32x4x2_t vld2q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s8)))
+int8x16x2_t vld2q_s8(const int8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s8)))
+int8x16x2_t vld2q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u16)))
+uint16x8x2_t vld2q_u16(const uint16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u16)))
+uint16x8x2_t vld2q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u32)))
+uint32x4x2_t vld2q_u32(const uint32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u32)))
+uint32x4x2_t vld2q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u8)))
+uint8x16x2_t vld2q_u8(const uint8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u8)))
+uint8x16x2_t vld2q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s16)))
+int16x8x4_t vld4q_s16(const int16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s16)))
+int16x8x4_t vld4q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s32)))
+int32x4x4_t vld4q_s32(const int32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s32)))
+int32x4x4_t vld4q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s8)))
+int8x16x4_t vld4q_s8(const int8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s8)))
+int8x16x4_t vld4q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u16)))
+uint16x8x4_t vld4q_u16(const uint16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u16)))
+uint16x8x4_t vld4q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u32)))
+uint32x4x4_t vld4q_u32(const uint32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u32)))
+uint32x4x4_t vld4q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u8)))
+uint8x16x4_t vld4q_u8(const uint8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u8)))
+uint8x16x4_t vld4q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
+int16x8_t vldrbq_gather_offset_s16(const int8_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
+int16x8_t vldrbq_gather_offset(const int8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
+int32x4_t vldrbq_gather_offset_s32(const int8_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
+int32x4_t vldrbq_gather_offset(const int8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
+int8x16_t vldrbq_gather_offset_s8(const int8_t *, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
+int8x16_t vldrbq_gather_offset(const int8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
+uint16x8_t vldrbq_gather_offset_u16(const uint8_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
+uint16x8_t vldrbq_gather_offset(const uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
+uint32x4_t vldrbq_gather_offset_u32(const uint8_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
+uint32x4_t vldrbq_gather_offset(const uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
+uint8x16_t vldrbq_gather_offset_u8(const uint8_t *, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
+uint8x16_t vldrbq_gather_offset(const uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
+int16x8_t vldrbq_gather_offset_z_s16(const int8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
+int16x8_t vldrbq_gather_offset_z(const int8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
+int32x4_t vldrbq_gather_offset_z_s32(const int8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
+int32x4_t vldrbq_gather_offset_z(const int8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
+int8x16_t vldrbq_gather_offset_z_s8(const int8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
+int8x16_t vldrbq_gather_offset_z(const int8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
+uint16x8_t vldrbq_gather_offset_z_u16(const uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
+uint16x8_t vldrbq_gather_offset_z(const uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
+uint32x4_t vldrbq_gather_offset_z_u32(const uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
+uint32x4_t vldrbq_gather_offset_z(const uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
+uint8x16_t vldrbq_gather_offset_z_u8(const uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
+uint8x16_t vldrbq_gather_offset_z(const uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s16)))
+int16x8_t vldrbq_s16(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s32)))
+int32x4_t vldrbq_s32(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s8)))
+int8x16_t vldrbq_s8(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u16)))
+uint16x8_t vldrbq_u16(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u32)))
+uint32x4_t vldrbq_u32(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u8)))
+uint8x16_t vldrbq_u8(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s16)))
+int16x8_t vldrbq_z_s16(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s32)))
+int32x4_t vldrbq_z_s32(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s8)))
+int8x16_t vldrbq_z_s8(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u16)))
+uint16x8_t vldrbq_z_u16(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u32)))
+uint32x4_t vldrbq_z_u32(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u8)))
+uint8x16_t vldrbq_z_u8(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_s64)))
+int64x2_t vldrdq_gather_base_s64(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_u64)))
+uint64x2_t vldrdq_gather_base_u64(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_s64)))
+int64x2_t vldrdq_gather_base_wb_s64(uint64x2_t *, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_u64)))
+uint64x2_t vldrdq_gather_base_wb_u64(uint64x2_t *, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_s64)))
+int64x2_t vldrdq_gather_base_wb_z_s64(uint64x2_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_u64)))
+uint64x2_t vldrdq_gather_base_wb_z_u64(uint64x2_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_z_s64)))
+int64x2_t vldrdq_gather_base_z_s64(uint64x2_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_z_u64)))
+uint64x2_t vldrdq_gather_base_z_u64(uint64x2_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
+int64x2_t vldrdq_gather_offset_s64(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
+int64x2_t vldrdq_gather_offset(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
+uint64x2_t vldrdq_gather_offset_u64(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
+uint64x2_t vldrdq_gather_offset(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
+int64x2_t vldrdq_gather_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
+int64x2_t vldrdq_gather_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
+uint64x2_t vldrdq_gather_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
+uint64x2_t vldrdq_gather_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
+int64x2_t vldrdq_gather_shifted_offset_s64(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
+int64x2_t vldrdq_gather_shifted_offset(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
+uint64x2_t vldrdq_gather_shifted_offset_u64(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
+uint64x2_t vldrdq_gather_shifted_offset(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
+int64x2_t vldrdq_gather_shifted_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
+int64x2_t vldrdq_gather_shifted_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
+uint64x2_t vldrdq_gather_shifted_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
+uint64x2_t vldrdq_gather_shifted_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
+int16x8_t vldrhq_gather_offset_s16(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
+int16x8_t vldrhq_gather_offset(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
+int32x4_t vldrhq_gather_offset_s32(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
+int32x4_t vldrhq_gather_offset(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
+uint16x8_t vldrhq_gather_offset_u16(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
+uint16x8_t vldrhq_gather_offset(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
+uint32x4_t vldrhq_gather_offset_u32(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
+uint32x4_t vldrhq_gather_offset(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
+int16x8_t vldrhq_gather_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
+int16x8_t vldrhq_gather_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
+int32x4_t vldrhq_gather_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
+int32x4_t vldrhq_gather_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
+uint16x8_t vldrhq_gather_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
+uint16x8_t vldrhq_gather_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
+uint32x4_t vldrhq_gather_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
+uint32x4_t vldrhq_gather_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
+int16x8_t vldrhq_gather_shifted_offset_s16(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
+int16x8_t vldrhq_gather_shifted_offset(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
+int32x4_t vldrhq_gather_shifted_offset_s32(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
+int32x4_t vldrhq_gather_shifted_offset(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
+uint16x8_t vldrhq_gather_shifted_offset_u16(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
+uint16x8_t vldrhq_gather_shifted_offset(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
+uint32x4_t vldrhq_gather_shifted_offset_u32(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
+uint32x4_t vldrhq_gather_shifted_offset(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
+int16x8_t vldrhq_gather_shifted_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
+int16x8_t vldrhq_gather_shifted_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
+int32x4_t vldrhq_gather_shifted_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
+int32x4_t vldrhq_gather_shifted_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
+uint16x8_t vldrhq_gather_shifted_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
+uint16x8_t vldrhq_gather_shifted_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
+uint32x4_t vldrhq_gather_shifted_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
+uint32x4_t vldrhq_gather_shifted_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_s16)))
+int16x8_t vldrhq_s16(const int16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_s32)))
+int32x4_t vldrhq_s32(const int16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_u16)))
+uint16x8_t vldrhq_u16(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_u32)))
+uint32x4_t vldrhq_u32(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_s16)))
+int16x8_t vldrhq_z_s16(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_s32)))
+int32x4_t vldrhq_z_s32(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_u16)))
+uint16x8_t vldrhq_z_u16(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_u32)))
+uint32x4_t vldrhq_z_u32(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_s32)))
+int32x4_t vldrwq_gather_base_s32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_u32)))
+uint32x4_t vldrwq_gather_base_u32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_s32)))
+int32x4_t vldrwq_gather_base_wb_s32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_u32)))
+uint32x4_t vldrwq_gather_base_wb_u32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_s32)))
+int32x4_t vldrwq_gather_base_wb_z_s32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_u32)))
+uint32x4_t vldrwq_gather_base_wb_z_u32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_s32)))
+int32x4_t vldrwq_gather_base_z_s32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_u32)))
+uint32x4_t vldrwq_gather_base_z_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
+int32x4_t vldrwq_gather_offset_s32(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
+int32x4_t vldrwq_gather_offset(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
+uint32x4_t vldrwq_gather_offset_u32(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
+uint32x4_t vldrwq_gather_offset(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
+int32x4_t vldrwq_gather_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
+int32x4_t vldrwq_gather_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
+uint32x4_t vldrwq_gather_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
+uint32x4_t vldrwq_gather_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
+int32x4_t vldrwq_gather_shifted_offset_s32(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
+int32x4_t vldrwq_gather_shifted_offset(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
+uint32x4_t vldrwq_gather_shifted_offset_u32(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
+uint32x4_t vldrwq_gather_shifted_offset(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
+int32x4_t vldrwq_gather_shifted_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
+int32x4_t vldrwq_gather_shifted_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
+uint32x4_t vldrwq_gather_shifted_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
+uint32x4_t vldrwq_gather_shifted_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_s32)))
+int32x4_t vldrwq_s32(const int32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_u32)))
+uint32x4_t vldrwq_u32(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_s32)))
+int32x4_t vldrwq_z_s32(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_u32)))
+uint32x4_t vldrwq_z_u32(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s16)))
+int16_t vmaxvq_s16(int16_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s16)))
+int16_t vmaxvq(int16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s32)))
+int32_t vmaxvq_s32(int32_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s32)))
+int32_t vmaxvq(int32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s8)))
+int8_t vmaxvq_s8(int8_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s8)))
+int8_t vmaxvq(int8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u16)))
+uint16_t vmaxvq_u16(uint16_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u16)))
+uint16_t vmaxvq(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u32)))
+uint32_t vmaxvq_u32(uint32_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u32)))
+uint32_t vmaxvq(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u8)))
+uint8_t vmaxvq_u8(uint8_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u8)))
+uint8_t vmaxvq(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s16)))
+int16_t vminvq_s16(int16_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s16)))
+int16_t vminvq(int16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s32)))
+int32_t vminvq_s32(int32_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s32)))
+int32_t vminvq(int32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s8)))
+int8_t vminvq_s8(int8_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s8)))
+int8_t vminvq(int8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u16)))
+uint16_t vminvq_u16(uint16_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u16)))
+uint16_t vminvq(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u32)))
+uint32_t vminvq_u32(uint32_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u32)))
+uint32_t vminvq(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u8)))
+uint8_t vminvq_u8(uint8_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u8)))
+uint8_t vminvq(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
+int16x8_t vreinterpretq_s16_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
+int16x8_t vreinterpretq_s16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
+int16x8_t vreinterpretq_s16_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
+int16x8_t vreinterpretq_s16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
+int16x8_t vreinterpretq_s16_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
+int16x8_t vreinterpretq_s16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
+int16x8_t vreinterpretq_s16_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
+int16x8_t vreinterpretq_s16(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
+int16x8_t vreinterpretq_s16_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
+int16x8_t vreinterpretq_s16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
+int16x8_t vreinterpretq_s16_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
+int16x8_t vreinterpretq_s16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
+int16x8_t vreinterpretq_s16_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
+int16x8_t vreinterpretq_s16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
+int32x4_t vreinterpretq_s32_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
+int32x4_t vreinterpretq_s32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
+int32x4_t vreinterpretq_s32_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
+int32x4_t vreinterpretq_s32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
+int32x4_t vreinterpretq_s32_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
+int32x4_t vreinterpretq_s32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
+int32x4_t vreinterpretq_s32_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
+int32x4_t vreinterpretq_s32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
+int32x4_t vreinterpretq_s32_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
+int32x4_t vreinterpretq_s32(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
+int32x4_t vreinterpretq_s32_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
+int32x4_t vreinterpretq_s32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
+int32x4_t vreinterpretq_s32_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
+int32x4_t vreinterpretq_s32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
+int64x2_t vreinterpretq_s64_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
+int64x2_t vreinterpretq_s64(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
+int64x2_t vreinterpretq_s64_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
+int64x2_t vreinterpretq_s64(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
+int64x2_t vreinterpretq_s64_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
+int64x2_t vreinterpretq_s64(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
+int64x2_t vreinterpretq_s64_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
+int64x2_t vreinterpretq_s64(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
+int64x2_t vreinterpretq_s64_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
+int64x2_t vreinterpretq_s64(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
+int64x2_t vreinterpretq_s64_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
+int64x2_t vreinterpretq_s64(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
+int64x2_t vreinterpretq_s64_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
+int64x2_t vreinterpretq_s64(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
+int8x16_t vreinterpretq_s8_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
+int8x16_t vreinterpretq_s8(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
+int8x16_t vreinterpretq_s8_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
+int8x16_t vreinterpretq_s8(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
+int8x16_t vreinterpretq_s8_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
+int8x16_t vreinterpretq_s8(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
+int8x16_t vreinterpretq_s8_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
+int8x16_t vreinterpretq_s8(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
+int8x16_t vreinterpretq_s8_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
+int8x16_t vreinterpretq_s8(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
+int8x16_t vreinterpretq_s8_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
+int8x16_t vreinterpretq_s8(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
+int8x16_t vreinterpretq_s8_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
+int8x16_t vreinterpretq_s8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
+uint16x8_t vreinterpretq_u16_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
+uint16x8_t vreinterpretq_u16(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
+uint16x8_t vreinterpretq_u16_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
+uint16x8_t vreinterpretq_u16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
+uint16x8_t vreinterpretq_u16_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
+uint16x8_t vreinterpretq_u16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
+uint16x8_t vreinterpretq_u16_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
+uint16x8_t vreinterpretq_u16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
+uint16x8_t vreinterpretq_u16_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
+uint16x8_t vreinterpretq_u16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
+uint16x8_t vreinterpretq_u16_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
+uint16x8_t vreinterpretq_u16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
+uint16x8_t vreinterpretq_u16_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
+uint16x8_t vreinterpretq_u16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
+uint32x4_t vreinterpretq_u32_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
+uint32x4_t vreinterpretq_u32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
+uint32x4_t vreinterpretq_u32_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
+uint32x4_t vreinterpretq_u32(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
+uint32x4_t vreinterpretq_u32_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
+uint32x4_t vreinterpretq_u32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
+uint32x4_t vreinterpretq_u32_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
+uint32x4_t vreinterpretq_u32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
+uint32x4_t vreinterpretq_u32_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
+uint32x4_t vreinterpretq_u32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
+uint32x4_t vreinterpretq_u32_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
+uint32x4_t vreinterpretq_u32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
+uint32x4_t vreinterpretq_u32_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
+uint32x4_t vreinterpretq_u32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
+uint64x2_t vreinterpretq_u64_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
+uint64x2_t vreinterpretq_u64(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
+uint64x2_t vreinterpretq_u64_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
+uint64x2_t vreinterpretq_u64(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
+uint64x2_t vreinterpretq_u64_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
+uint64x2_t vreinterpretq_u64(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
+uint64x2_t vreinterpretq_u64_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
+uint64x2_t vreinterpretq_u64(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
+uint64x2_t vreinterpretq_u64_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
+uint64x2_t vreinterpretq_u64(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
+uint64x2_t vreinterpretq_u64_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
+uint64x2_t vreinterpretq_u64(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
+uint64x2_t vreinterpretq_u64_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
+uint64x2_t vreinterpretq_u64(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
+uint8x16_t vreinterpretq_u8_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
+uint8x16_t vreinterpretq_u8(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
+uint8x16_t vreinterpretq_u8_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
+uint8x16_t vreinterpretq_u8(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
+uint8x16_t vreinterpretq_u8_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
+uint8x16_t vreinterpretq_u8(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
+uint8x16_t vreinterpretq_u8_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
+uint8x16_t vreinterpretq_u8(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
+uint8x16_t vreinterpretq_u8_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
+uint8x16_t vreinterpretq_u8(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
+uint8x16_t vreinterpretq_u8_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
+uint8x16_t vreinterpretq_u8(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
+uint8x16_t vreinterpretq_u8_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
+uint8x16_t vreinterpretq_u8(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s16)))
+int16x8_t vsetq_lane_s16(int16_t, int16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s16)))
+int16x8_t vsetq_lane(int16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s32)))
+int32x4_t vsetq_lane_s32(int32_t, int32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s32)))
+int32x4_t vsetq_lane(int32_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s64)))
+int64x2_t vsetq_lane_s64(int64_t, int64x2_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s64)))
+int64x2_t vsetq_lane(int64_t, int64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s8)))
+int8x16_t vsetq_lane_s8(int8_t, int8x16_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s8)))
+int8x16_t vsetq_lane(int8_t, int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u16)))
+uint16x8_t vsetq_lane_u16(uint16_t, uint16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u16)))
+uint16x8_t vsetq_lane(uint16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u32)))
+uint32x4_t vsetq_lane_u32(uint32_t, uint32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u32)))
+uint32x4_t vsetq_lane(uint32_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u64)))
+uint64x2_t vsetq_lane_u64(uint64_t, uint64x2_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u64)))
+uint64x2_t vsetq_lane(uint64_t, uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u8)))
+uint8x16_t vsetq_lane_u8(uint8_t, uint8x16_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u8)))
+uint8x16_t vsetq_lane(uint8_t, uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s16)))
+void vst1q_p_s16(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s16)))
+void vst1q_p(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s32)))
+void vst1q_p_s32(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s32)))
+void vst1q_p(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s8)))
+void vst1q_p_s8(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s8)))
+void vst1q_p(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u16)))
+void vst1q_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u16)))
+void vst1q_p(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u32)))
+void vst1q_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u32)))
+void vst1q_p(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u8)))
+void vst1q_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u8)))
+void vst1q_p(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s16)))
+void vst1q_s16(int16_t *, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s16)))
+void vst1q(int16_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s32)))
+void vst1q_s32(int32_t *, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s32)))
+void vst1q(int32_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s8)))
+void vst1q_s8(int8_t *, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s8)))
+void vst1q(int8_t *, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u16)))
+void vst1q_u16(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u16)))
+void vst1q(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u32)))
+void vst1q_u32(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u32)))
+void vst1q(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u8)))
+void vst1q_u8(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u8)))
+void vst1q(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s16)))
+void vst2q_s16(int16_t *, int16x8x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s16)))
+void vst2q(int16_t *, int16x8x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s32)))
+void vst2q_s32(int32_t *, int32x4x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s32)))
+void vst2q(int32_t *, int32x4x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s8)))
+void vst2q_s8(int8_t *, int8x16x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s8)))
+void vst2q(int8_t *, int8x16x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u16)))
+void vst2q_u16(uint16_t *, uint16x8x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u16)))
+void vst2q(uint16_t *, uint16x8x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u32)))
+void vst2q_u32(uint32_t *, uint32x4x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u32)))
+void vst2q(uint32_t *, uint32x4x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u8)))
+void vst2q_u8(uint8_t *, uint8x16x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u8)))
+void vst2q(uint8_t *, uint8x16x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s16)))
+void vst4q_s16(int16_t *, int16x8x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s16)))
+void vst4q(int16_t *, int16x8x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s32)))
+void vst4q_s32(int32_t *, int32x4x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s32)))
+void vst4q(int32_t *, int32x4x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s8)))
+void vst4q_s8(int8_t *, int8x16x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s8)))
+void vst4q(int8_t *, int8x16x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u16)))
+void vst4q_u16(uint16_t *, uint16x8x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u16)))
+void vst4q(uint16_t *, uint16x8x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u32)))
+void vst4q_u32(uint32_t *, uint32x4x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u32)))
+void vst4q(uint32_t *, uint32x4x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u8)))
+void vst4q_u8(uint8_t *, uint8x16x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u8)))
+void vst4q(uint8_t *, uint8x16x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s16)))
+void vstrbq_p_s16(int8_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s16)))
+void vstrbq_p(int8_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s32)))
+void vstrbq_p_s32(int8_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s32)))
+void vstrbq_p(int8_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s8)))
+void vstrbq_p_s8(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s8)))
+void vstrbq_p(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u16)))
+void vstrbq_p_u16(uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u16)))
+void vstrbq_p(uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u32)))
+void vstrbq_p_u32(uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u32)))
+void vstrbq_p(uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u8)))
+void vstrbq_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u8)))
+void vstrbq_p(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s16)))
+void vstrbq_s16(int8_t *, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s16)))
+void vstrbq(int8_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s32)))
+void vstrbq_s32(int8_t *, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s32)))
+void vstrbq(int8_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s8)))
+void vstrbq_s8(int8_t *, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s8)))
+void vstrbq(int8_t *, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
+void vstrbq_scatter_offset_p_s16(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
+void vstrbq_scatter_offset_p(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
+void vstrbq_scatter_offset_p_s32(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
+void vstrbq_scatter_offset_p(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
+void vstrbq_scatter_offset_p_s8(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
+void vstrbq_scatter_offset_p(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
+void vstrbq_scatter_offset_p_u16(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
+void vstrbq_scatter_offset_p(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
+void vstrbq_scatter_offset_p_u32(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
+void vstrbq_scatter_offset_p(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
+void vstrbq_scatter_offset_p_u8(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
+void vstrbq_scatter_offset_p(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
+void vstrbq_scatter_offset_s16(int8_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
+void vstrbq_scatter_offset(int8_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
+void vstrbq_scatter_offset_s32(int8_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
+void vstrbq_scatter_offset(int8_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
+void vstrbq_scatter_offset_s8(int8_t *, uint8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
+void vstrbq_scatter_offset(int8_t *, uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
+void vstrbq_scatter_offset_u16(uint8_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
+void vstrbq_scatter_offset(uint8_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
+void vstrbq_scatter_offset_u32(uint8_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
+void vstrbq_scatter_offset(uint8_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
+void vstrbq_scatter_offset_u8(uint8_t *, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
+void vstrbq_scatter_offset(uint8_t *, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u16)))
+void vstrbq_u16(uint8_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u16)))
+void vstrbq(uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u32)))
+void vstrbq_u32(uint8_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u32)))
+void vstrbq(uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u8)))
+void vstrbq_u8(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u8)))
+void vstrbq(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
+void vstrdq_scatter_base_p_s64(uint64x2_t, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
+void vstrdq_scatter_base_p(uint64x2_t, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
+void vstrdq_scatter_base_p_u64(uint64x2_t, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
+void vstrdq_scatter_base_p(uint64x2_t, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
+void vstrdq_scatter_base_s64(uint64x2_t, int, int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
+void vstrdq_scatter_base(uint64x2_t, int, int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
+void vstrdq_scatter_base_u64(uint64x2_t, int, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
+void vstrdq_scatter_base(uint64x2_t, int, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
+void vstrdq_scatter_base_wb_p_s64(uint64x2_t *, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
+void vstrdq_scatter_base_wb_p(uint64x2_t *, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
+void vstrdq_scatter_base_wb_p_u64(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
+void vstrdq_scatter_base_wb_p(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
+void vstrdq_scatter_base_wb_s64(uint64x2_t *, int, int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
+void vstrdq_scatter_base_wb(uint64x2_t *, int, int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
+void vstrdq_scatter_base_wb_u64(uint64x2_t *, int, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
+void vstrdq_scatter_base_wb(uint64x2_t *, int, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
+void vstrdq_scatter_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
+void vstrdq_scatter_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
+void vstrdq_scatter_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
+void vstrdq_scatter_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
+void vstrdq_scatter_offset_s64(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
+void vstrdq_scatter_offset(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
+void vstrdq_scatter_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
+void vstrdq_scatter_offset(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
+void vstrdq_scatter_shifted_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
+void vstrdq_scatter_shifted_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
+void vstrdq_scatter_shifted_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
+void vstrdq_scatter_shifted_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
+void vstrdq_scatter_shifted_offset_s64(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
+void vstrdq_scatter_shifted_offset(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
+void vstrdq_scatter_shifted_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
+void vstrdq_scatter_shifted_offset(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s16)))
+void vstrhq_p_s16(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s16)))
+void vstrhq_p(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s32)))
+void vstrhq_p_s32(int16_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s32)))
+void vstrhq_p(int16_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u16)))
+void vstrhq_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u16)))
+void vstrhq_p(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u32)))
+void vstrhq_p_u32(uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u32)))
+void vstrhq_p(uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s16)))
+void vstrhq_s16(int16_t *, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s16)))
+void vstrhq(int16_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s32)))
+void vstrhq_s32(int16_t *, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s32)))
+void vstrhq(int16_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
+void vstrhq_scatter_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
+void vstrhq_scatter_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
+void vstrhq_scatter_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
+void vstrhq_scatter_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
+void vstrhq_scatter_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
+void vstrhq_scatter_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
+void vstrhq_scatter_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
+void vstrhq_scatter_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
+void vstrhq_scatter_offset_s16(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
+void vstrhq_scatter_offset(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
+void vstrhq_scatter_offset_s32(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
+void vstrhq_scatter_offset(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
+void vstrhq_scatter_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
+void vstrhq_scatter_offset(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
+void vstrhq_scatter_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
+void vstrhq_scatter_offset(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
+void vstrhq_scatter_shifted_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
+void vstrhq_scatter_shifted_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
+void vstrhq_scatter_shifted_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
+void vstrhq_scatter_shifted_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
+void vstrhq_scatter_shifted_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
+void vstrhq_scatter_shifted_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
+void vstrhq_scatter_shifted_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
+void vstrhq_scatter_shifted_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
+void vstrhq_scatter_shifted_offset_s16(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
+void vstrhq_scatter_shifted_offset(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
+void vstrhq_scatter_shifted_offset_s32(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
+void vstrhq_scatter_shifted_offset(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
+void vstrhq_scatter_shifted_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
+void vstrhq_scatter_shifted_offset(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
+void vstrhq_scatter_shifted_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
+void vstrhq_scatter_shifted_offset(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u16)))
+void vstrhq_u16(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u16)))
+void vstrhq(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u32)))
+void vstrhq_u32(uint16_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u32)))
+void vstrhq(uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_s32)))
+void vstrwq_p_s32(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_s32)))
+void vstrwq_p(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_u32)))
+void vstrwq_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_u32)))
+void vstrwq_p(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_s32)))
+void vstrwq_s32(int32_t *, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_s32)))
+void vstrwq(int32_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
+void vstrwq_scatter_base_p_s32(uint32x4_t, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
+void vstrwq_scatter_base_p(uint32x4_t, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
+void vstrwq_scatter_base_p_u32(uint32x4_t, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
+void vstrwq_scatter_base_p(uint32x4_t, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
+void vstrwq_scatter_base_s32(uint32x4_t, int, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
+void vstrwq_scatter_base(uint32x4_t, int, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
+void vstrwq_scatter_base_u32(uint32x4_t, int, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
+void vstrwq_scatter_base(uint32x4_t, int, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
+void vstrwq_scatter_base_wb_p_s32(uint32x4_t *, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
+void vstrwq_scatter_base_wb_p(uint32x4_t *, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
+void vstrwq_scatter_base_wb_p_u32(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
+void vstrwq_scatter_base_wb_p(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
+void vstrwq_scatter_base_wb_s32(uint32x4_t *, int, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
+void vstrwq_scatter_base_wb(uint32x4_t *, int, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
+void vstrwq_scatter_base_wb_u32(uint32x4_t *, int, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
+void vstrwq_scatter_base_wb(uint32x4_t *, int, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
+void vstrwq_scatter_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
+void vstrwq_scatter_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
+void vstrwq_scatter_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
+void vstrwq_scatter_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
+void vstrwq_scatter_offset_s32(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
+void vstrwq_scatter_offset(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
+void vstrwq_scatter_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
+void vstrwq_scatter_offset(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
+void vstrwq_scatter_shifted_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
+void vstrwq_scatter_shifted_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
+void vstrwq_scatter_shifted_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
+void vstrwq_scatter_shifted_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
+void vstrwq_scatter_shifted_offset_s32(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
+void vstrwq_scatter_shifted_offset(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
+void vstrwq_scatter_shifted_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
+void vstrwq_scatter_shifted_offset(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_u32)))
+void vstrwq_u32(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_u32)))
+void vstrwq(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s16)))
+int16x8_t vsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s16)))
+int16x8_t vsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s32)))
+int32x4_t vsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s32)))
+int32x4_t vsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s8)))
+int8x16_t vsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s8)))
+int8x16_t vsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u16)))
+uint16x8_t vsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u16)))
+uint16x8_t vsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u32)))
+uint32x4_t vsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u32)))
+uint32x4_t vsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u8)))
+uint8x16_t vsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u8)))
+uint8x16_t vsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s16)))
+int16x8_t vsubq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s16)))
+int16x8_t vsubq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s32)))
+int32x4_t vsubq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s32)))
+int32x4_t vsubq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s8)))
+int8x16_t vsubq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s8)))
+int8x16_t vsubq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u16)))
+uint16x8_t vsubq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u16)))
+uint16x8_t vsubq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u32)))
+uint32x4_t vsubq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u32)))
+uint32x4_t vsubq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u8)))
+uint8x16_t vsubq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u8)))
+uint8x16_t vsubq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s16)))
+int16x8_t vuninitializedq(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s32)))
+int32x4_t vuninitializedq(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s64)))
+int64x2_t vuninitializedq(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s8)))
+int8x16_t vuninitializedq(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u16)))
+uint16x8_t vuninitializedq(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u32)))
+uint32x4_t vuninitializedq(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u64)))
+uint64x2_t vuninitializedq(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u8)))
+uint8x16_t vuninitializedq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s16)))
+int16x8_t vuninitializedq_s16();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s32)))
+int32x4_t vuninitializedq_s32();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s64)))
+int64x2_t vuninitializedq_s64();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s8)))
+int8x16_t vuninitializedq_s8();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u16)))
+uint16x8_t vuninitializedq_u16();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u32)))
+uint32x4_t vuninitializedq_u32();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u64)))
+uint64x2_t vuninitializedq_u64();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u8)))
+uint8x16_t vuninitializedq_u8();
+
+#endif /* (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) */
+
+#if (__ARM_FEATURE_MVE & 2) && (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE)
+
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_f16)))
+float16x8_t vaddq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_f16)))
+float16x8_t vaddq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_f32)))
+float32x4_t vaddq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_f32)))
+float32x4_t vaddq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f16)))
+float16x8_t vaddq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f16)))
+float16x8_t vaddq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f32)))
+float32x4_t vaddq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f32)))
+float32x4_t vaddq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f16)))
+mve_pred16_t vcmpeqq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f16)))
+mve_pred16_t vcmpeqq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f32)))
+mve_pred16_t vcmpeqq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f32)))
+mve_pred16_t vcmpeqq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
+mve_pred16_t vcmpeqq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
+mve_pred16_t vcmpeqq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
+mve_pred16_t vcmpeqq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
+mve_pred16_t vcmpeqq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
+mve_pred16_t vcmpeqq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
+mve_pred16_t vcmpeqq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
+mve_pred16_t vcmpeqq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
+mve_pred16_t vcmpeqq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
+mve_pred16_t vcmpeqq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
+mve_pred16_t vcmpeqq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
+mve_pred16_t vcmpeqq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
+mve_pred16_t vcmpeqq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f16)))
+mve_pred16_t vcmpgeq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f16)))
+mve_pred16_t vcmpgeq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f32)))
+mve_pred16_t vcmpgeq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f32)))
+mve_pred16_t vcmpgeq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
+mve_pred16_t vcmpgeq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
+mve_pred16_t vcmpgeq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
+mve_pred16_t vcmpgeq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
+mve_pred16_t vcmpgeq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
+mve_pred16_t vcmpgeq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
+mve_pred16_t vcmpgeq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
+mve_pred16_t vcmpgeq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
+mve_pred16_t vcmpgeq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
+mve_pred16_t vcmpgeq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
+mve_pred16_t vcmpgeq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
+mve_pred16_t vcmpgeq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
+mve_pred16_t vcmpgeq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f16)))
+mve_pred16_t vcmpgtq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f16)))
+mve_pred16_t vcmpgtq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f32)))
+mve_pred16_t vcmpgtq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f32)))
+mve_pred16_t vcmpgtq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
+mve_pred16_t vcmpgtq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
+mve_pred16_t vcmpgtq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
+mve_pred16_t vcmpgtq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
+mve_pred16_t vcmpgtq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
+mve_pred16_t vcmpgtq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
+mve_pred16_t vcmpgtq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
+mve_pred16_t vcmpgtq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
+mve_pred16_t vcmpgtq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
+mve_pred16_t vcmpgtq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
+mve_pred16_t vcmpgtq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
+mve_pred16_t vcmpgtq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
+mve_pred16_t vcmpgtq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f16)))
+mve_pred16_t vcmpleq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f16)))
+mve_pred16_t vcmpleq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f32)))
+mve_pred16_t vcmpleq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f32)))
+mve_pred16_t vcmpleq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f16)))
+mve_pred16_t vcmpleq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f16)))
+mve_pred16_t vcmpleq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f32)))
+mve_pred16_t vcmpleq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f32)))
+mve_pred16_t vcmpleq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
+mve_pred16_t vcmpleq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
+mve_pred16_t vcmpleq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
+mve_pred16_t vcmpleq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
+mve_pred16_t vcmpleq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f16)))
+mve_pred16_t vcmpleq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f16)))
+mve_pred16_t vcmpleq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f32)))
+mve_pred16_t vcmpleq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f32)))
+mve_pred16_t vcmpleq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f16)))
+mve_pred16_t vcmpltq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f16)))
+mve_pred16_t vcmpltq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f32)))
+mve_pred16_t vcmpltq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f32)))
+mve_pred16_t vcmpltq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f16)))
+mve_pred16_t vcmpltq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f16)))
+mve_pred16_t vcmpltq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f32)))
+mve_pred16_t vcmpltq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f32)))
+mve_pred16_t vcmpltq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
+mve_pred16_t vcmpltq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
+mve_pred16_t vcmpltq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
+mve_pred16_t vcmpltq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
+mve_pred16_t vcmpltq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f16)))
+mve_pred16_t vcmpltq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f16)))
+mve_pred16_t vcmpltq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f32)))
+mve_pred16_t vcmpltq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f32)))
+mve_pred16_t vcmpltq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f16)))
+mve_pred16_t vcmpneq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f16)))
+mve_pred16_t vcmpneq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f32)))
+mve_pred16_t vcmpneq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f32)))
+mve_pred16_t vcmpneq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f16)))
+mve_pred16_t vcmpneq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f16)))
+mve_pred16_t vcmpneq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f32)))
+mve_pred16_t vcmpneq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f32)))
+mve_pred16_t vcmpneq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
+mve_pred16_t vcmpneq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
+mve_pred16_t vcmpneq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
+mve_pred16_t vcmpneq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
+mve_pred16_t vcmpneq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f16)))
+mve_pred16_t vcmpneq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f16)))
+mve_pred16_t vcmpneq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f32)))
+mve_pred16_t vcmpneq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f32)))
+mve_pred16_t vcmpneq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_f16)))
+float16x8_t vcreateq_f16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_f32)))
+float32x4_t vcreateq_f32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtbq_f16_f32)))
+float16x8_t vcvtbq_f16_f32(float16x8_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtbq_m_f16_f32)))
+float16x8_t vcvtbq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvttq_f16_f32)))
+float16x8_t vcvttq_f16_f32(float16x8_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvttq_m_f16_f32)))
+float16x8_t vcvttq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f16)))
+float16_t vgetq_lane_f16(float16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f16)))
+float16_t vgetq_lane(float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f32)))
+float32_t vgetq_lane_f32(float32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f32)))
+float32_t vgetq_lane(float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_f16)))
+float16x8_t vld1q_f16(const float16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_f16)))
+float16x8_t vld1q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_f32)))
+float32x4_t vld1q_f32(const float32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_f32)))
+float32x4_t vld1q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f16)))
+float16x8_t vld1q_z_f16(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f16)))
+float16x8_t vld1q_z(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f32)))
+float32x4_t vld1q_z_f32(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f32)))
+float32x4_t vld1q_z(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_f16)))
+float16x8x2_t vld2q_f16(const float16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_f16)))
+float16x8x2_t vld2q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_f32)))
+float32x4x2_t vld2q_f32(const float32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_f32)))
+float32x4x2_t vld2q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_f16)))
+float16x8x4_t vld4q_f16(const float16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_f16)))
+float16x8x4_t vld4q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_f32)))
+float32x4x4_t vld4q_f32(const float32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_f32)))
+float32x4x4_t vld4q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_f16)))
+float16x8_t vldrhq_f16(const float16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
+float16x8_t vldrhq_gather_offset_f16(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
+float16x8_t vldrhq_gather_offset(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
+float16x8_t vldrhq_gather_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
+float16x8_t vldrhq_gather_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
+float16x8_t vldrhq_gather_shifted_offset_f16(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
+float16x8_t vldrhq_gather_shifted_offset(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
+float16x8_t vldrhq_gather_shifted_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
+float16x8_t vldrhq_gather_shifted_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_f16)))
+float16x8_t vldrhq_z_f16(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_f32)))
+float32x4_t vldrwq_f32(const float32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_f32)))
+float32x4_t vldrwq_gather_base_f32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_f32)))
+float32x4_t vldrwq_gather_base_wb_f32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_f32)))
+float32x4_t vldrwq_gather_base_wb_z_f32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_f32)))
+float32x4_t vldrwq_gather_base_z_f32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
+float32x4_t vldrwq_gather_offset_f32(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
+float32x4_t vldrwq_gather_offset(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
+float32x4_t vldrwq_gather_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
+float32x4_t vldrwq_gather_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
+float32x4_t vldrwq_gather_shifted_offset_f32(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
+float32x4_t vldrwq_gather_shifted_offset(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
+float32x4_t vldrwq_gather_shifted_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
+float32x4_t vldrwq_gather_shifted_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_f32)))
+float32x4_t vldrwq_z_f32(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
+float16x8_t vreinterpretq_f16_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
+float16x8_t vreinterpretq_f16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
+float16x8_t vreinterpretq_f16_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
+float16x8_t vreinterpretq_f16(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
+float16x8_t vreinterpretq_f16_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
+float16x8_t vreinterpretq_f16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
+float16x8_t vreinterpretq_f16_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
+float16x8_t vreinterpretq_f16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
+float16x8_t vreinterpretq_f16_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
+float16x8_t vreinterpretq_f16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
+float16x8_t vreinterpretq_f16_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
+float16x8_t vreinterpretq_f16(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
+float16x8_t vreinterpretq_f16_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
+float16x8_t vreinterpretq_f16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
+float16x8_t vreinterpretq_f16_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
+float16x8_t vreinterpretq_f16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
+float16x8_t vreinterpretq_f16_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
+float16x8_t vreinterpretq_f16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
+float32x4_t vreinterpretq_f32_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
+float32x4_t vreinterpretq_f32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
+float32x4_t vreinterpretq_f32_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
+float32x4_t vreinterpretq_f32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
+float32x4_t vreinterpretq_f32_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
+float32x4_t vreinterpretq_f32(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
+float32x4_t vreinterpretq_f32_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
+float32x4_t vreinterpretq_f32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
+float32x4_t vreinterpretq_f32_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
+float32x4_t vreinterpretq_f32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
+float32x4_t vreinterpretq_f32_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
+float32x4_t vreinterpretq_f32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
+float32x4_t vreinterpretq_f32_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
+float32x4_t vreinterpretq_f32(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
+float32x4_t vreinterpretq_f32_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
+float32x4_t vreinterpretq_f32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
+float32x4_t vreinterpretq_f32_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
+float32x4_t vreinterpretq_f32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
+int16x8_t vreinterpretq_s16_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
+int16x8_t vreinterpretq_s16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
+int16x8_t vreinterpretq_s16_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
+int16x8_t vreinterpretq_s16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
+int32x4_t vreinterpretq_s32_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
+int32x4_t vreinterpretq_s32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
+int32x4_t vreinterpretq_s32_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
+int32x4_t vreinterpretq_s32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
+int64x2_t vreinterpretq_s64_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
+int64x2_t vreinterpretq_s64(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
+int64x2_t vreinterpretq_s64_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
+int64x2_t vreinterpretq_s64(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
+int8x16_t vreinterpretq_s8_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
+int8x16_t vreinterpretq_s8(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
+int8x16_t vreinterpretq_s8_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
+int8x16_t vreinterpretq_s8(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
+uint16x8_t vreinterpretq_u16_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
+uint16x8_t vreinterpretq_u16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
+uint16x8_t vreinterpretq_u16_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
+uint16x8_t vreinterpretq_u16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
+uint32x4_t vreinterpretq_u32_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
+uint32x4_t vreinterpretq_u32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
+uint32x4_t vreinterpretq_u32_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
+uint32x4_t vreinterpretq_u32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
+uint64x2_t vreinterpretq_u64_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
+uint64x2_t vreinterpretq_u64(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
+uint64x2_t vreinterpretq_u64_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
+uint64x2_t vreinterpretq_u64(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
+uint8x16_t vreinterpretq_u8_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
+uint8x16_t vreinterpretq_u8(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
+uint8x16_t vreinterpretq_u8_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
+uint8x16_t vreinterpretq_u8(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f16)))
+float16x8_t vsetq_lane_f16(float16_t, float16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f16)))
+float16x8_t vsetq_lane(float16_t, float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f32)))
+float32x4_t vsetq_lane_f32(float32_t, float32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f32)))
+float32x4_t vsetq_lane(float32_t, float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_f16)))
+void vst1q_f16(float16_t *, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_f16)))
+void vst1q(float16_t *, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_f32)))
+void vst1q_f32(float32_t *, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_f32)))
+void vst1q(float32_t *, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f16)))
+void vst1q_p_f16(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f16)))
+void vst1q_p(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f32)))
+void vst1q_p_f32(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f32)))
+void vst1q_p(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_f16)))
+void vst2q_f16(float16_t *, float16x8x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_f16)))
+void vst2q(float16_t *, float16x8x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_f32)))
+void vst2q_f32(float32_t *, float32x4x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_f32)))
+void vst2q(float32_t *, float32x4x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_f16)))
+void vst4q_f16(float16_t *, float16x8x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_f16)))
+void vst4q(float16_t *, float16x8x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_f32)))
+void vst4q_f32(float32_t *, float32x4x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_f32)))
+void vst4q(float32_t *, float32x4x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_f16)))
+void vstrhq_f16(float16_t *, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_f16)))
+void vstrhq(float16_t *, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_f16)))
+void vstrhq_p_f16(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_f16)))
+void vstrhq_p(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
+void vstrhq_scatter_offset_f16(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
+void vstrhq_scatter_offset(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
+void vstrhq_scatter_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
+void vstrhq_scatter_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
+void vstrhq_scatter_shifted_offset_f16(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
+void vstrhq_scatter_shifted_offset(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
+void vstrhq_scatter_shifted_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
+void vstrhq_scatter_shifted_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_f32)))
+void vstrwq_f32(float32_t *, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_f32)))
+void vstrwq(float32_t *, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_f32)))
+void vstrwq_p_f32(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_f32)))
+void vstrwq_p(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
+void vstrwq_scatter_base_f32(uint32x4_t, int, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
+void vstrwq_scatter_base(uint32x4_t, int, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
+void vstrwq_scatter_base_p_f32(uint32x4_t, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
+void vstrwq_scatter_base_p(uint32x4_t, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
+void vstrwq_scatter_base_wb_f32(uint32x4_t *, int, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
+void vstrwq_scatter_base_wb(uint32x4_t *, int, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
+void vstrwq_scatter_base_wb_p_f32(uint32x4_t *, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
+void vstrwq_scatter_base_wb_p(uint32x4_t *, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
+void vstrwq_scatter_offset_f32(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
+void vstrwq_scatter_offset(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
+void vstrwq_scatter_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
+void vstrwq_scatter_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
+void vstrwq_scatter_shifted_offset_f32(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
+void vstrwq_scatter_shifted_offset(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
+void vstrwq_scatter_shifted_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
+void vstrwq_scatter_shifted_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_f16)))
+float16x8_t vsubq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_f16)))
+float16x8_t vsubq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_f32)))
+float32x4_t vsubq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_f32)))
+float32x4_t vsubq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f16)))
+float16x8_t vsubq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f16)))
+float16x8_t vsubq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f32)))
+float32x4_t vsubq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f32)))
+float32x4_t vsubq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_f16)))
+float16x8_t vuninitializedq_f16();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_f32)))
+float32x4_t vuninitializedq_f32();
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f16)))
+float16x8_t vuninitializedq(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f32)))
+float32x4_t vuninitializedq(float32x4_t);
+
+#endif /* (__ARM_FEATURE_MVE & 2) && (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) */
+
+#endif /* __ARM_MVE_H */
diff --git a/darwin-x86/lib64/clang/10.0.1/include/arm_neon.h b/darwin-x86/lib64/clang/10.0.5/include/arm_neon.h
similarity index 97%
rename from darwin-x86/lib64/clang/10.0.1/include/arm_neon.h
rename to darwin-x86/lib64/clang/10.0.5/include/arm_neon.h
index 6b55c4b..3c59d59 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/arm_neon.h
+++ b/darwin-x86/lib64/clang/10.0.5/include/arm_neon.h
@@ -6086,7 +6086,7 @@
 #define vget_lane_p8(__p0, __p1) __extension__ ({ \
   poly8x8_t __s0 = __p0; \
   poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
+  __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6094,13 +6094,13 @@
   poly8x8_t __s0 = __p0; \
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
+  __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \
   poly8x8_t __s0 = __p0; \
   poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
+  __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6109,7 +6109,7 @@
 #define vget_lane_p16(__p0, __p1) __extension__ ({ \
   poly16x4_t __s0 = __p0; \
   poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+  __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6117,13 +6117,13 @@
   poly16x4_t __s0 = __p0; \
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
+  __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \
   poly16x4_t __s0 = __p0; \
   poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+  __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6132,7 +6132,7 @@
 #define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
   poly8x16_t __s0 = __p0; \
   poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
+  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6140,13 +6140,13 @@
   poly8x16_t __s0 = __p0; \
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
+  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \
   poly8x16_t __s0 = __p0; \
   poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
+  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6155,7 +6155,7 @@
 #define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
   poly16x8_t __s0 = __p0; \
   poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6163,13 +6163,13 @@
   poly16x8_t __s0 = __p0; \
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
+  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \
   poly16x8_t __s0 = __p0; \
   poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6201,7 +6201,7 @@
 #define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
   uint32x4_t __s0 = __p0; \
   uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
+  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6209,13 +6209,13 @@
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \
+  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \
   uint32x4_t __s0 = __p0; \
   uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
+  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6224,7 +6224,7 @@
 #define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
   uint64x2_t __s0 = __p0; \
   uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6232,13 +6232,13 @@
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
+  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \
   uint64x2_t __s0 = __p0; \
   uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6247,7 +6247,7 @@
 #define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
   uint16x8_t __s0 = __p0; \
   uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6255,13 +6255,13 @@
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
+  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \
   uint16x8_t __s0 = __p0; \
   uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6293,7 +6293,7 @@
 #define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
   float32x4_t __s0 = __p0; \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \
+  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6301,13 +6301,13 @@
   float32x4_t __s0 = __p0; \
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__rev0, __p1); \
+  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \
   float32x4_t __s0 = __p0; \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \
+  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6316,7 +6316,7 @@
 #define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
   int32x4_t __s0 = __p0; \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
+  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6324,13 +6324,13 @@
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \
+  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \
   int32x4_t __s0 = __p0; \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
+  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6339,7 +6339,7 @@
 #define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
   int64x2_t __s0 = __p0; \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6347,13 +6347,13 @@
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
+  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \
   int64x2_t __s0 = __p0; \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6362,7 +6362,7 @@
 #define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
   int16x8_t __s0 = __p0; \
   int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6370,13 +6370,13 @@
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
+  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \
   int16x8_t __s0 = __p0; \
   int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6408,7 +6408,7 @@
 #define vget_lane_u32(__p0, __p1) __extension__ ({ \
   uint32x2_t __s0 = __p0; \
   uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
+  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6416,13 +6416,13 @@
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \
+  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \
   uint32x2_t __s0 = __p0; \
   uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
+  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6430,14 +6430,14 @@
 #define vget_lane_u64(__p0, __p1) __extension__ ({ \
   uint64x1_t __s0 = __p0; \
   uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
+  __ret = (uint64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_u16(__p0, __p1) __extension__ ({ \
   uint16x4_t __s0 = __p0; \
   uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6445,13 +6445,13 @@
   uint16x4_t __s0 = __p0; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
+  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \
   uint16x4_t __s0 = __p0; \
   uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6483,7 +6483,7 @@
 #define vget_lane_f32(__p0, __p1) __extension__ ({ \
   float32x2_t __s0 = __p0; \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \
+  __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6491,13 +6491,13 @@
   float32x2_t __s0 = __p0; \
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__rev0, __p1); \
+  __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \
   float32x2_t __s0 = __p0; \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \
+  __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6506,7 +6506,7 @@
 #define vget_lane_s32(__p0, __p1) __extension__ ({ \
   int32x2_t __s0 = __p0; \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
+  __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6514,13 +6514,13 @@
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \
+  __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \
   int32x2_t __s0 = __p0; \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
+  __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6528,14 +6528,14 @@
 #define vget_lane_s64(__p0, __p1) __extension__ ({ \
   int64x1_t __s0 = __p0; \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
+  __ret = (int64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_s16(__p0, __p1) __extension__ ({ \
   int16x4_t __s0 = __p0; \
   int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+  __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6543,13 +6543,13 @@
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
+  __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \
   int16x4_t __s0 = __p0; \
   int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+  __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -10034,7 +10034,7 @@
 #define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x4x2_t __s1 = __p1; \
   float32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 41); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \
   __ret; \
 })
 #else
@@ -10044,7 +10044,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   float32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 41); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -10056,7 +10056,7 @@
 #define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x4x2_t __s1 = __p1; \
   int32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 34); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \
   __ret; \
 })
 #else
@@ -10066,7 +10066,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   int32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 34); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -10078,7 +10078,7 @@
 #define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x8x2_t __s1 = __p1; \
   int16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 33); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \
   __ret; \
 })
 #else
@@ -10088,7 +10088,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 33); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -10188,7 +10188,7 @@
 #define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x2x2_t __s1 = __p1; \
   float32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 9); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \
   __ret; \
 })
 #else
@@ -10198,7 +10198,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   float32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 9); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -10210,7 +10210,7 @@
 #define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x2x2_t __s1 = __p1; \
   int32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 2); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \
   __ret; \
 })
 #else
@@ -10220,7 +10220,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   int32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 2); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -10232,7 +10232,7 @@
 #define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x4x2_t __s1 = __p1; \
   int16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 1); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \
   __ret; \
 })
 #else
@@ -10242,7 +10242,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   int16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 1); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -11078,7 +11078,7 @@
 #define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x4x3_t __s1 = __p1; \
   float32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \
   __ret; \
 })
 #else
@@ -11089,7 +11089,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   float32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -11102,7 +11102,7 @@
 #define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x4x3_t __s1 = __p1; \
   int32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \
   __ret; \
 })
 #else
@@ -11113,7 +11113,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   int32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -11126,7 +11126,7 @@
 #define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x8x3_t __s1 = __p1; \
   int16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \
   __ret; \
 })
 #else
@@ -11137,7 +11137,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -11246,7 +11246,7 @@
 #define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x2x3_t __s1 = __p1; \
   float32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \
   __ret; \
 })
 #else
@@ -11257,7 +11257,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   float32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -11270,7 +11270,7 @@
 #define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x2x3_t __s1 = __p1; \
   int32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \
   __ret; \
 })
 #else
@@ -11281,7 +11281,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   int32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -11294,7 +11294,7 @@
 #define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x4x3_t __s1 = __p1; \
   int16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \
   __ret; \
 })
 #else
@@ -11305,7 +11305,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   int16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -12190,7 +12190,7 @@
 #define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x4x4_t __s1 = __p1; \
   float32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \
   __ret; \
 })
 #else
@@ -12202,7 +12202,7 @@
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
   float32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -12216,7 +12216,7 @@
 #define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x4x4_t __s1 = __p1; \
   int32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \
   __ret; \
 })
 #else
@@ -12228,7 +12228,7 @@
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
   int32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -12242,7 +12242,7 @@
 #define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x8x4_t __s1 = __p1; \
   int16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \
   __ret; \
 })
 #else
@@ -12254,7 +12254,7 @@
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -12372,7 +12372,7 @@
 #define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x2x4_t __s1 = __p1; \
   float32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \
   __ret; \
 })
 #else
@@ -12384,7 +12384,7 @@
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
   float32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -12398,7 +12398,7 @@
 #define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x2x4_t __s1 = __p1; \
   int32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \
   __ret; \
 })
 #else
@@ -12410,7 +12410,7 @@
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
   int32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -12424,7 +12424,7 @@
 #define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x4x4_t __s1 = __p1; \
   int16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \
   __ret; \
 })
 #else
@@ -12436,7 +12436,7 @@
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
   int16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -15725,20 +15725,20 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
   uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
+  __ret = vmull_u32(__p0, (uint32x2_t) {__p1, __p1});
   return __ret;
 }
 #else
 __ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
+  __ret = __noswap_vmull_u32(__rev0, (uint32x2_t) {__p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
 __ai uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
   uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
+  __ret = __noswap_vmull_u32(__p0, (uint32x2_t) {__p1, __p1});
   return __ret;
 }
 #endif
@@ -15746,20 +15746,20 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
   uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
+  __ret = vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1});
   return __ret;
 }
 #else
 __ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
+  __ret = __noswap_vmull_u16(__rev0, (uint16x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
 __ai uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
   uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
+  __ret = __noswap_vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1});
   return __ret;
 }
 #endif
@@ -15767,20 +15767,20 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+  __ret = vmull_s32(__p0, (int32x2_t) {__p1, __p1});
   return __ret;
 }
 #else
 __ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+  __ret = __noswap_vmull_s32(__rev0, (int32x2_t) {__p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
 __ai int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) {
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+  __ret = __noswap_vmull_s32(__p0, (int32x2_t) {__p1, __p1});
   return __ret;
 }
 #endif
@@ -15788,20 +15788,20 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+  __ret = vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
   return __ret;
 }
 #else
 __ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+  __ret = __noswap_vmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
 __ai int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) {
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+  __ret = __noswap_vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
   return __ret;
 }
 #endif
@@ -17862,7 +17862,7 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+  __ret = vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
   return __ret;
 }
 #else
@@ -17870,13 +17870,13 @@
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+  __ret = __noswap_vqdmlal_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
 __ai int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+  __ret = __noswap_vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
   return __ret;
 }
 #endif
@@ -17884,7 +17884,7 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+  __ret = vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
   return __ret;
 }
 #else
@@ -17892,13 +17892,13 @@
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+  __ret = __noswap_vqdmlal_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
 __ai int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+  __ret = __noswap_vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
   return __ret;
 }
 #endif
@@ -18000,7 +18000,7 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+  __ret = vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
   return __ret;
 }
 #else
@@ -18008,13 +18008,13 @@
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+  __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
 __ai int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+  __ret = __noswap_vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
   return __ret;
 }
 #endif
@@ -18022,7 +18022,7 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+  __ret = vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
   return __ret;
 }
 #else
@@ -18030,13 +18030,13 @@
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+  __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
 __ai int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+  __ret = __noswap_vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
   return __ret;
 }
 #endif
@@ -18216,14 +18216,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
+  __ret = vqdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1});
   return __ret;
 }
 #else
 __ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
+  __ret = __noswap_vqdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
@@ -18232,14 +18232,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
   int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
+  __ret = vqdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
   return __ret;
 }
 #else
 __ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
+  __ret = __noswap_vqdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
 }
@@ -18248,14 +18248,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
   int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
+  __ret = vqdmulh_s32(__p0, (int32x2_t) {__p1, __p1});
   return __ret;
 }
 #else
 __ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
+  __ret = __noswap_vqdmulh_s32(__rev0, (int32x2_t) {__p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
@@ -18264,14 +18264,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
   int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
+  __ret = vqdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
   return __ret;
 }
 #else
 __ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
+  __ret = __noswap_vqdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
@@ -18366,20 +18366,20 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+  __ret = vqdmull_s32(__p0, (int32x2_t) {__p1, __p1});
   return __ret;
 }
 #else
 __ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+  __ret = __noswap_vqdmull_s32(__rev0, (int32x2_t) {__p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
 __ai int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+  __ret = __noswap_vqdmull_s32(__p0, (int32x2_t) {__p1, __p1});
   return __ret;
 }
 #endif
@@ -18387,20 +18387,20 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+  __ret = vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
   return __ret;
 }
 #else
 __ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+  __ret = __noswap_vqdmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
 __ai int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+  __ret = __noswap_vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
   return __ret;
 }
 #endif
@@ -18865,14 +18865,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
+  __ret = vqrdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1});
   return __ret;
 }
 #else
 __ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
+  __ret = __noswap_vqrdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
@@ -18881,14 +18881,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
   int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
+  __ret = vqrdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
   return __ret;
 }
 #else
 __ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
+  __ret = __noswap_vqrdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
 }
@@ -18897,14 +18897,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
   int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
+  __ret = vqrdmulh_s32(__p0, (int32x2_t) {__p1, __p1});
   return __ret;
 }
 #else
 __ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
+  __ret = __noswap_vqrdmulh_s32(__rev0, (int32x2_t) {__p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
@@ -18913,14 +18913,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
   int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
+  __ret = vqrdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
   return __ret;
 }
 #else
 __ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
+  __ret = __noswap_vqrdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
@@ -22727,7 +22727,7 @@
   poly8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
   poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -22736,7 +22736,7 @@
   poly8x8_t __s1 = __p1; \
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
+  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
@@ -22744,7 +22744,7 @@
   poly8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
   poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -22754,7 +22754,7 @@
   poly16_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
   poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -22763,7 +22763,7 @@
   poly16x4_t __s1 = __p1; \
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
+  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
@@ -22771,7 +22771,7 @@
   poly16_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
   poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -22781,7 +22781,7 @@
   poly8_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
   poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -22790,7 +22790,7 @@
   poly8x16_t __s1 = __p1; \
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
@@ -22798,7 +22798,7 @@
   poly8_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
   poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -22808,7 +22808,7 @@
   poly16_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
   poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -22817,7 +22817,7 @@
   poly16x8_t __s1 = __p1; \
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
@@ -22825,7 +22825,7 @@
   poly16_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
   poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -22862,7 +22862,7 @@
   uint32_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -22871,7 +22871,7 @@
   uint32x4_t __s1 = __p1; \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
@@ -22879,7 +22879,7 @@
   uint32_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -22889,7 +22889,7 @@
   uint64_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
   uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -22898,7 +22898,7 @@
   uint64x2_t __s1 = __p1; \
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
   uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
@@ -22906,7 +22906,7 @@
   uint64_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
   uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -22916,7 +22916,7 @@
   uint16_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
   uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -22925,7 +22925,7 @@
   uint16x8_t __s1 = __p1; \
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
@@ -22933,7 +22933,7 @@
   uint16_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
   uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -22970,7 +22970,7 @@
   float32_t __s0 = __p0; \
   float32x4_t __s1 = __p1; \
   float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -22979,7 +22979,7 @@
   float32x4_t __s1 = __p1; \
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
@@ -22987,7 +22987,7 @@
   float32_t __s0 = __p0; \
   float32x4_t __s1 = __p1; \
   float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -22997,7 +22997,7 @@
   int32_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
   int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -23006,7 +23006,7 @@
   int32x4_t __s1 = __p1; \
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
@@ -23014,7 +23014,7 @@
   int32_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
   int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -23024,7 +23024,7 @@
   int64_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
   int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -23033,7 +23033,7 @@
   int64x2_t __s1 = __p1; \
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
   int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
@@ -23041,7 +23041,7 @@
   int64_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
   int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -23051,7 +23051,7 @@
   int16_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
   int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -23060,7 +23060,7 @@
   int16x8_t __s1 = __p1; \
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
@@ -23068,7 +23068,7 @@
   int16_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
   int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -23105,7 +23105,7 @@
   uint32_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
   uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -23114,7 +23114,7 @@
   uint32x2_t __s1 = __p1; \
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
   uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \
+  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
@@ -23122,7 +23122,7 @@
   uint32_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
   uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -23131,7 +23131,7 @@
   uint64_t __s0 = __p0; \
   uint64x1_t __s1 = __p1; \
   uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
@@ -23139,7 +23139,7 @@
   uint16_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
   uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -23148,7 +23148,7 @@
   uint16x4_t __s1 = __p1; \
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
+  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
@@ -23156,7 +23156,7 @@
   uint16_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
   uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -23193,7 +23193,7 @@
   float32_t __s0 = __p0; \
   float32x2_t __s1 = __p1; \
   float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -23202,7 +23202,7 @@
   float32x2_t __s1 = __p1; \
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
   float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__rev1, __p2); \
+  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
@@ -23210,7 +23210,7 @@
   float32_t __s0 = __p0; \
   float32x2_t __s1 = __p1; \
   float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -23220,7 +23220,7 @@
   int32_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
   int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -23229,7 +23229,7 @@
   int32x2_t __s1 = __p1; \
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
   int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \
+  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
@@ -23237,7 +23237,7 @@
   int32_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
   int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -23246,7 +23246,7 @@
   int64_t __s0 = __p0; \
   int64x1_t __s1 = __p1; \
   int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
@@ -23254,7 +23254,7 @@
   int16_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
   int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -23263,7 +23263,7 @@
   int16x4_t __s1 = __p1; \
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
+  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
@@ -23271,7 +23271,7 @@
   int16_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
   int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -26106,7 +26106,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
   float32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 41); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \
 })
 #else
 #define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
@@ -26114,14 +26114,14 @@
   float32x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 41); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
   int32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 34); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \
 })
 #else
 #define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
@@ -26129,14 +26129,14 @@
   int32x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 34); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
   int64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 35); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \
 })
 #else
 #define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
@@ -26144,14 +26144,14 @@
   int64x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 35); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
   int16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 33); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \
 })
 #else
 #define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
@@ -26159,7 +26159,7 @@
   int16x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 33); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \
 })
 #endif
 
@@ -26230,7 +26230,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst1_f32_x2(__p0, __p1) __extension__ ({ \
   float32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 9); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \
 })
 #else
 #define vst1_f32_x2(__p0, __p1) __extension__ ({ \
@@ -26238,14 +26238,14 @@
   float32x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1_s32_x2(__p0, __p1) __extension__ ({ \
   int32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 2); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \
 })
 #else
 #define vst1_s32_x2(__p0, __p1) __extension__ ({ \
@@ -26253,18 +26253,18 @@
   int32x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \
 })
 #endif
 
 #define vst1_s64_x2(__p0, __p1) __extension__ ({ \
   int64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 3); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vst1_s16_x2(__p0, __p1) __extension__ ({ \
   int16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 1); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \
 })
 #else
 #define vst1_s16_x2(__p0, __p1) __extension__ ({ \
@@ -26272,7 +26272,7 @@
   int16x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \
 })
 #endif
 
@@ -26423,7 +26423,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
   float32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 41); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \
 })
 #else
 #define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
@@ -26432,14 +26432,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
   int32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 34); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \
 })
 #else
 #define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
@@ -26448,14 +26448,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
   int64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 35); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \
 })
 #else
 #define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
@@ -26464,14 +26464,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 35); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
   int16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 33); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \
 })
 #else
 #define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
@@ -26480,7 +26480,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \
 })
 #endif
 
@@ -26555,7 +26555,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst1_f32_x3(__p0, __p1) __extension__ ({ \
   float32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 9); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \
 })
 #else
 #define vst1_f32_x3(__p0, __p1) __extension__ ({ \
@@ -26564,14 +26564,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1_s32_x3(__p0, __p1) __extension__ ({ \
   int32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 2); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \
 })
 #else
 #define vst1_s32_x3(__p0, __p1) __extension__ ({ \
@@ -26580,18 +26580,18 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \
 })
 #endif
 
 #define vst1_s64_x3(__p0, __p1) __extension__ ({ \
   int64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vst1_s16_x3(__p0, __p1) __extension__ ({ \
   int16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 1); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \
 })
 #else
 #define vst1_s16_x3(__p0, __p1) __extension__ ({ \
@@ -26600,7 +26600,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \
 })
 #endif
 
@@ -26760,7 +26760,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
   float32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 41); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \
 })
 #else
 #define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
@@ -26770,14 +26770,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
   int32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 34); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \
 })
 #else
 #define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
@@ -26787,14 +26787,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
   int64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 35); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \
 })
 #else
 #define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
@@ -26804,14 +26804,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 35); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
   int16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 33); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \
 })
 #else
 #define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
@@ -26821,7 +26821,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \
 })
 #endif
 
@@ -26900,7 +26900,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst1_f32_x4(__p0, __p1) __extension__ ({ \
   float32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 9); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \
 })
 #else
 #define vst1_f32_x4(__p0, __p1) __extension__ ({ \
@@ -26910,14 +26910,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1_s32_x4(__p0, __p1) __extension__ ({ \
   int32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 2); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \
 })
 #else
 #define vst1_s32_x4(__p0, __p1) __extension__ ({ \
@@ -26927,18 +26927,18 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \
 })
 #endif
 
 #define vst1_s64_x4(__p0, __p1) __extension__ ({ \
   int64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vst1_s16_x4(__p0, __p1) __extension__ ({ \
   int16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 1); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \
 })
 #else
 #define vst1_s16_x4(__p0, __p1) __extension__ ({ \
@@ -26948,7 +26948,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \
 })
 #endif
 
@@ -27075,7 +27075,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_f32(__p0, __p1) __extension__ ({ \
   float32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 41); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \
 })
 #else
 #define vst2q_f32(__p0, __p1) __extension__ ({ \
@@ -27083,14 +27083,14 @@
   float32x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 41); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_s32(__p0, __p1) __extension__ ({ \
   int32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 34); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \
 })
 #else
 #define vst2q_s32(__p0, __p1) __extension__ ({ \
@@ -27098,14 +27098,14 @@
   int32x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 34); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_s16(__p0, __p1) __extension__ ({ \
   int16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 33); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \
 })
 #else
 #define vst2q_s16(__p0, __p1) __extension__ ({ \
@@ -27113,7 +27113,7 @@
   int16x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 33); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \
 })
 #endif
 
@@ -27184,7 +27184,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst2_f32(__p0, __p1) __extension__ ({ \
   float32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 9); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \
 })
 #else
 #define vst2_f32(__p0, __p1) __extension__ ({ \
@@ -27192,14 +27192,14 @@
   float32x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2_s32(__p0, __p1) __extension__ ({ \
   int32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 2); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \
 })
 #else
 #define vst2_s32(__p0, __p1) __extension__ ({ \
@@ -27207,18 +27207,18 @@
   int32x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \
 })
 #endif
 
 #define vst2_s64(__p0, __p1) __extension__ ({ \
   int64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 3); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vst2_s16(__p0, __p1) __extension__ ({ \
   int16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 1); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \
 })
 #else
 #define vst2_s16(__p0, __p1) __extension__ ({ \
@@ -27226,7 +27226,7 @@
   int16x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \
 })
 #endif
 
@@ -27308,7 +27308,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 41); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \
 })
 #else
 #define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
@@ -27316,14 +27316,14 @@
   float32x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 41); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 34); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \
 })
 #else
 #define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
@@ -27331,14 +27331,14 @@
   int32x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 34); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 33); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \
 })
 #else
 #define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
@@ -27346,7 +27346,7 @@
   int16x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 33); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \
 })
 #endif
 
@@ -27413,7 +27413,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 9); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \
 })
 #else
 #define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
@@ -27421,14 +27421,14 @@
   float32x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 9); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 2); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \
 })
 #else
 #define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
@@ -27436,14 +27436,14 @@
   int32x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 2); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 1); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \
 })
 #else
 #define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
@@ -27451,7 +27451,7 @@
   int16x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 1); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \
 })
 #endif
 
@@ -27586,7 +27586,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_f32(__p0, __p1) __extension__ ({ \
   float32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 41); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \
 })
 #else
 #define vst3q_f32(__p0, __p1) __extension__ ({ \
@@ -27595,14 +27595,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_s32(__p0, __p1) __extension__ ({ \
   int32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 34); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \
 })
 #else
 #define vst3q_s32(__p0, __p1) __extension__ ({ \
@@ -27611,14 +27611,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_s16(__p0, __p1) __extension__ ({ \
   int16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 33); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \
 })
 #else
 #define vst3q_s16(__p0, __p1) __extension__ ({ \
@@ -27627,7 +27627,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \
 })
 #endif
 
@@ -27702,7 +27702,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst3_f32(__p0, __p1) __extension__ ({ \
   float32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 9); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \
 })
 #else
 #define vst3_f32(__p0, __p1) __extension__ ({ \
@@ -27711,14 +27711,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3_s32(__p0, __p1) __extension__ ({ \
   int32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 2); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \
 })
 #else
 #define vst3_s32(__p0, __p1) __extension__ ({ \
@@ -27727,18 +27727,18 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \
 })
 #endif
 
 #define vst3_s64(__p0, __p1) __extension__ ({ \
   int64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vst3_s16(__p0, __p1) __extension__ ({ \
   int16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 1); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \
 })
 #else
 #define vst3_s16(__p0, __p1) __extension__ ({ \
@@ -27747,7 +27747,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \
 })
 #endif
 
@@ -27834,7 +27834,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \
 })
 #else
 #define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
@@ -27843,14 +27843,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \
 })
 #else
 #define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
@@ -27859,14 +27859,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \
 })
 #else
 #define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
@@ -27875,7 +27875,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \
 })
 #endif
 
@@ -27946,7 +27946,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \
 })
 #else
 #define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
@@ -27955,14 +27955,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \
 })
 #else
 #define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
@@ -27971,14 +27971,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \
 })
 #else
 #define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
@@ -27987,7 +27987,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \
 })
 #endif
 
@@ -28130,7 +28130,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_f32(__p0, __p1) __extension__ ({ \
   float32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 41); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \
 })
 #else
 #define vst4q_f32(__p0, __p1) __extension__ ({ \
@@ -28140,14 +28140,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_s32(__p0, __p1) __extension__ ({ \
   int32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 34); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \
 })
 #else
 #define vst4q_s32(__p0, __p1) __extension__ ({ \
@@ -28157,14 +28157,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_s16(__p0, __p1) __extension__ ({ \
   int16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 33); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \
 })
 #else
 #define vst4q_s16(__p0, __p1) __extension__ ({ \
@@ -28174,7 +28174,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \
 })
 #endif
 
@@ -28253,7 +28253,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst4_f32(__p0, __p1) __extension__ ({ \
   float32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 9); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \
 })
 #else
 #define vst4_f32(__p0, __p1) __extension__ ({ \
@@ -28263,14 +28263,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4_s32(__p0, __p1) __extension__ ({ \
   int32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 2); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \
 })
 #else
 #define vst4_s32(__p0, __p1) __extension__ ({ \
@@ -28280,18 +28280,18 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \
 })
 #endif
 
 #define vst4_s64(__p0, __p1) __extension__ ({ \
   int64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vst4_s16(__p0, __p1) __extension__ ({ \
   int16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 1); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \
 })
 #else
 #define vst4_s16(__p0, __p1) __extension__ ({ \
@@ -28301,7 +28301,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \
 })
 #endif
 
@@ -28393,7 +28393,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \
 })
 #else
 #define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
@@ -28403,14 +28403,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \
 })
 #else
 #define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
@@ -28420,14 +28420,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \
 })
 #else
 #define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
@@ -28437,7 +28437,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \
 })
 #endif
 
@@ -28512,7 +28512,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \
 })
 #else
 #define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
@@ -28522,14 +28522,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \
 })
 #else
 #define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
@@ -28539,14 +28539,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \
 })
 #else
 #define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
@@ -28556,7 +28556,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \
 })
 #endif
 
@@ -32695,7 +32695,7 @@
 #define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x8x2_t __s1 = __p1; \
   float16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 40); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \
   __ret; \
 })
 #else
@@ -32705,7 +32705,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   float16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 40); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -32717,7 +32717,7 @@
 #define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x4x2_t __s1 = __p1; \
   float16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 8); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \
   __ret; \
 })
 #else
@@ -32727,7 +32727,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   float16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 8); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -32811,7 +32811,7 @@
 #define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x8x3_t __s1 = __p1; \
   float16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \
   __ret; \
 })
 #else
@@ -32822,7 +32822,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   float16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -32835,7 +32835,7 @@
 #define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x4x3_t __s1 = __p1; \
   float16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \
   __ret; \
 })
 #else
@@ -32846,7 +32846,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   float16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -32935,7 +32935,7 @@
 #define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x8x4_t __s1 = __p1; \
   float16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \
   __ret; \
 })
 #else
@@ -32947,7 +32947,7 @@
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
   float16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -32961,7 +32961,7 @@
 #define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x4x4_t __s1 = __p1; \
   float16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \
   __ret; \
 })
 #else
@@ -32973,7 +32973,7 @@
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
   float16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -33038,7 +33038,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
   float16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 40); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 40); \
 })
 #else
 #define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
@@ -33046,14 +33046,14 @@
   float16x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 40); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 40); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1_f16_x2(__p0, __p1) __extension__ ({ \
   float16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 8); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 8); \
 })
 #else
 #define vst1_f16_x2(__p0, __p1) __extension__ ({ \
@@ -33061,14 +33061,14 @@
   float16x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 8); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
   float16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 40); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 40); \
 })
 #else
 #define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
@@ -33077,14 +33077,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 40); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1_f16_x3(__p0, __p1) __extension__ ({ \
   float16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 8); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 8); \
 })
 #else
 #define vst1_f16_x3(__p0, __p1) __extension__ ({ \
@@ -33093,14 +33093,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 8); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
   float16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 40); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 40); \
 })
 #else
 #define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
@@ -33110,14 +33110,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 40); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1_f16_x4(__p0, __p1) __extension__ ({ \
   float16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 8); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 8); \
 })
 #else
 #define vst1_f16_x4(__p0, __p1) __extension__ ({ \
@@ -33127,14 +33127,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 8); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_f16(__p0, __p1) __extension__ ({ \
   float16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 40); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 40); \
 })
 #else
 #define vst2q_f16(__p0, __p1) __extension__ ({ \
@@ -33142,14 +33142,14 @@
   float16x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 40); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 40); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2_f16(__p0, __p1) __extension__ ({ \
   float16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 8); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 8); \
 })
 #else
 #define vst2_f16(__p0, __p1) __extension__ ({ \
@@ -33157,14 +33157,14 @@
   float16x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 8); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 40); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \
 })
 #else
 #define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
@@ -33172,14 +33172,14 @@
   float16x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 40); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 8); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \
 })
 #else
 #define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
@@ -33187,14 +33187,14 @@
   float16x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 8); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_f16(__p0, __p1) __extension__ ({ \
   float16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 40); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 40); \
 })
 #else
 #define vst3q_f16(__p0, __p1) __extension__ ({ \
@@ -33203,14 +33203,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 40); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3_f16(__p0, __p1) __extension__ ({ \
   float16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 8); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 8); \
 })
 #else
 #define vst3_f16(__p0, __p1) __extension__ ({ \
@@ -33219,14 +33219,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 8); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \
 })
 #else
 #define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
@@ -33235,14 +33235,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \
 })
 #else
 #define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
@@ -33251,14 +33251,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_f16(__p0, __p1) __extension__ ({ \
   float16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 40); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 40); \
 })
 #else
 #define vst4q_f16(__p0, __p1) __extension__ ({ \
@@ -33268,14 +33268,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 40); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4_f16(__p0, __p1) __extension__ ({ \
   float16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 8); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 8); \
 })
 #else
 #define vst4_f16(__p0, __p1) __extension__ ({ \
@@ -33285,14 +33285,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 8); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \
 })
 #else
 #define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
@@ -33302,14 +33302,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \
 })
 #else
 #define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
@@ -33319,7 +33319,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \
 })
 #endif
 
@@ -33652,7 +33652,7 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
   uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
+  __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__p0, __p1, __p2);
   return __ret;
 }
 #else
@@ -33660,7 +33660,7 @@
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
   uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
+  __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__rev0, __p1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
@@ -33674,7 +33674,7 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
   uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
+  __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__p0, __p1, __p2);
   return __ret;
 }
 #else
@@ -33682,7 +33682,7 @@
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
   uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
+  __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__rev0, __p1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
@@ -33691,7 +33691,7 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
   uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
+  __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__p0, __p1, __p2);
   return __ret;
 }
 #else
@@ -33699,7 +33699,7 @@
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
   uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
+  __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__rev0, __p1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
@@ -39200,7 +39200,7 @@
 #define vduph_lane_f16(__p0, __p1) __extension__ ({ \
   float16x4_t __s0 = __p0; \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_lane_f16((int8x8_t)__s0, __p1); \
+  __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -39208,7 +39208,7 @@
   float16x4_t __s0 = __p0; \
   float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_lane_f16((int8x8_t)__rev0, __p1); \
+  __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -39217,7 +39217,7 @@
 #define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
   float16x8_t __s0 = __p0; \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((int8x16_t)__s0, __p1); \
+  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -39225,7 +39225,7 @@
   float16x8_t __s0 = __p0; \
   float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((int8x16_t)__rev0, __p1); \
+  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -39236,7 +39236,7 @@
   float16_t __s1 = __p1; \
   float16x4_t __s2 = __p2; \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (int8x8_t)__s2, __p3); \
+  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \
   __ret; \
 })
 #else
@@ -39246,7 +39246,7 @@
   float16x4_t __s2 = __p2; \
   float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (int8x8_t)__rev2, __p3); \
+  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__rev2, __p3); \
   __ret; \
 })
 #define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
@@ -39254,7 +39254,7 @@
   float16_t __s1 = __p1; \
   float16x4_t __s2 = __p2; \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (int8x8_t)__s2, __p3); \
+  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \
   __ret; \
 })
 #endif
@@ -39329,7 +39329,7 @@
   float16_t __s1 = __p1; \
   float16x8_t __s2 = __p2; \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (int8x16_t)__s2, __p3); \
+  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \
   __ret; \
 })
 #else
@@ -39339,7 +39339,7 @@
   float16x8_t __s2 = __p2; \
   float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (int8x16_t)__rev2, __p3); \
+  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__rev2, __p3); \
   __ret; \
 })
 #define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
@@ -39347,7 +39347,7 @@
   float16_t __s1 = __p1; \
   float16x8_t __s2 = __p2; \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (int8x16_t)__s2, __p3); \
+  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \
   __ret; \
 })
 #endif
@@ -39873,7 +39873,7 @@
   float16_t __s0 = __p0; \
   float16x4_t __s1 = __p1; \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -39882,7 +39882,7 @@
   float16x4_t __s1 = __p1; \
   float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (int8x8_t)__rev1, __p2); \
+  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__rev1, __p2); \
   __ret; \
 })
 #endif
@@ -39934,7 +39934,7 @@
   float16_t __s0 = __p0; \
   float16x8_t __s1 = __p1; \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -39943,7 +39943,7 @@
   float16x8_t __s1 = __p1; \
   float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__rev1, __p2); \
   __ret; \
 })
 #endif
@@ -41173,14 +41173,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlvq_u8((int8x16_t)__p0);
+  __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__p0);
   return __ret;
 }
 #else
 __ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlvq_u8((int8x16_t)__rev0);
+  __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__rev0);
   return __ret;
 }
 #endif
@@ -41188,14 +41188,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
   uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlvq_u32((int8x16_t)__p0);
+  __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__p0);
   return __ret;
 }
 #else
 __ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlvq_u32((int8x16_t)__rev0);
+  __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__rev0);
   return __ret;
 }
 #endif
@@ -41203,14 +41203,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlvq_u16((int8x16_t)__p0);
+  __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__p0);
   return __ret;
 }
 #else
 __ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlvq_u16((int8x16_t)__rev0);
+  __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__rev0);
   return __ret;
 }
 #endif
@@ -41218,14 +41218,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16_t vaddlvq_s8(int8x16_t __p0) {
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlvq_s8((int8x16_t)__p0);
+  __ret = (int16_t) __builtin_neon_vaddlvq_s8(__p0);
   return __ret;
 }
 #else
 __ai int16_t vaddlvq_s8(int8x16_t __p0) {
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlvq_s8((int8x16_t)__rev0);
+  __ret = (int16_t) __builtin_neon_vaddlvq_s8(__rev0);
   return __ret;
 }
 #endif
@@ -41233,14 +41233,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int64_t vaddlvq_s32(int32x4_t __p0) {
   int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlvq_s32((int8x16_t)__p0);
+  __ret = (int64_t) __builtin_neon_vaddlvq_s32(__p0);
   return __ret;
 }
 #else
 __ai int64_t vaddlvq_s32(int32x4_t __p0) {
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlvq_s32((int8x16_t)__rev0);
+  __ret = (int64_t) __builtin_neon_vaddlvq_s32(__rev0);
   return __ret;
 }
 #endif
@@ -41248,14 +41248,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32_t vaddlvq_s16(int16x8_t __p0) {
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlvq_s16((int8x16_t)__p0);
+  __ret = (int32_t) __builtin_neon_vaddlvq_s16(__p0);
   return __ret;
 }
 #else
 __ai int32_t vaddlvq_s16(int16x8_t __p0) {
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlvq_s16((int8x16_t)__rev0);
+  __ret = (int32_t) __builtin_neon_vaddlvq_s16(__rev0);
   return __ret;
 }
 #endif
@@ -41263,14 +41263,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint16_t vaddlv_u8(uint8x8_t __p0) {
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlv_u8((int8x8_t)__p0);
+  __ret = (uint16_t) __builtin_neon_vaddlv_u8(__p0);
   return __ret;
 }
 #else
 __ai uint16_t vaddlv_u8(uint8x8_t __p0) {
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlv_u8((int8x8_t)__rev0);
+  __ret = (uint16_t) __builtin_neon_vaddlv_u8(__rev0);
   return __ret;
 }
 #endif
@@ -41278,14 +41278,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint64_t vaddlv_u32(uint32x2_t __p0) {
   uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlv_u32((int8x8_t)__p0);
+  __ret = (uint64_t) __builtin_neon_vaddlv_u32(__p0);
   return __ret;
 }
 #else
 __ai uint64_t vaddlv_u32(uint32x2_t __p0) {
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlv_u32((int8x8_t)__rev0);
+  __ret = (uint64_t) __builtin_neon_vaddlv_u32(__rev0);
   return __ret;
 }
 #endif
@@ -41293,14 +41293,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32_t vaddlv_u16(uint16x4_t __p0) {
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlv_u16((int8x8_t)__p0);
+  __ret = (uint32_t) __builtin_neon_vaddlv_u16(__p0);
   return __ret;
 }
 #else
 __ai uint32_t vaddlv_u16(uint16x4_t __p0) {
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlv_u16((int8x8_t)__rev0);
+  __ret = (uint32_t) __builtin_neon_vaddlv_u16(__rev0);
   return __ret;
 }
 #endif
@@ -41308,14 +41308,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16_t vaddlv_s8(int8x8_t __p0) {
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlv_s8((int8x8_t)__p0);
+  __ret = (int16_t) __builtin_neon_vaddlv_s8(__p0);
   return __ret;
 }
 #else
 __ai int16_t vaddlv_s8(int8x8_t __p0) {
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlv_s8((int8x8_t)__rev0);
+  __ret = (int16_t) __builtin_neon_vaddlv_s8(__rev0);
   return __ret;
 }
 #endif
@@ -41323,14 +41323,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int64_t vaddlv_s32(int32x2_t __p0) {
   int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlv_s32((int8x8_t)__p0);
+  __ret = (int64_t) __builtin_neon_vaddlv_s32(__p0);
   return __ret;
 }
 #else
 __ai int64_t vaddlv_s32(int32x2_t __p0) {
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlv_s32((int8x8_t)__rev0);
+  __ret = (int64_t) __builtin_neon_vaddlv_s32(__rev0);
   return __ret;
 }
 #endif
@@ -41338,14 +41338,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32_t vaddlv_s16(int16x4_t __p0) {
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlv_s16((int8x8_t)__p0);
+  __ret = (int32_t) __builtin_neon_vaddlv_s16(__p0);
   return __ret;
 }
 #else
 __ai int32_t vaddlv_s16(int16x4_t __p0) {
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlv_s16((int8x8_t)__rev0);
+  __ret = (int32_t) __builtin_neon_vaddlv_s16(__rev0);
   return __ret;
 }
 #endif
@@ -41353,14 +41353,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint8_t vaddvq_u8(uint8x16_t __p0) {
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddvq_u8((int8x16_t)__p0);
+  __ret = (uint8_t) __builtin_neon_vaddvq_u8(__p0);
   return __ret;
 }
 #else
 __ai uint8_t vaddvq_u8(uint8x16_t __p0) {
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddvq_u8((int8x16_t)__rev0);
+  __ret = (uint8_t) __builtin_neon_vaddvq_u8(__rev0);
   return __ret;
 }
 #endif
@@ -41368,14 +41368,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32_t vaddvq_u32(uint32x4_t __p0) {
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddvq_u32((int8x16_t)__p0);
+  __ret = (uint32_t) __builtin_neon_vaddvq_u32(__p0);
   return __ret;
 }
 #else
 __ai uint32_t vaddvq_u32(uint32x4_t __p0) {
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddvq_u32((int8x16_t)__rev0);
+  __ret = (uint32_t) __builtin_neon_vaddvq_u32(__rev0);
   return __ret;
 }
 #endif
@@ -41383,14 +41383,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint64_t vaddvq_u64(uint64x2_t __p0) {
   uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddvq_u64((int8x16_t)__p0);
+  __ret = (uint64_t) __builtin_neon_vaddvq_u64(__p0);
   return __ret;
 }
 #else
 __ai uint64_t vaddvq_u64(uint64x2_t __p0) {
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddvq_u64((int8x16_t)__rev0);
+  __ret = (uint64_t) __builtin_neon_vaddvq_u64(__rev0);
   return __ret;
 }
 #endif
@@ -41398,14 +41398,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint16_t vaddvq_u16(uint16x8_t __p0) {
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddvq_u16((int8x16_t)__p0);
+  __ret = (uint16_t) __builtin_neon_vaddvq_u16(__p0);
   return __ret;
 }
 #else
 __ai uint16_t vaddvq_u16(uint16x8_t __p0) {
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddvq_u16((int8x16_t)__rev0);
+  __ret = (uint16_t) __builtin_neon_vaddvq_u16(__rev0);
   return __ret;
 }
 #endif
@@ -41413,14 +41413,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int8_t vaddvq_s8(int8x16_t __p0) {
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddvq_s8((int8x16_t)__p0);
+  __ret = (int8_t) __builtin_neon_vaddvq_s8(__p0);
   return __ret;
 }
 #else
 __ai int8_t vaddvq_s8(int8x16_t __p0) {
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddvq_s8((int8x16_t)__rev0);
+  __ret = (int8_t) __builtin_neon_vaddvq_s8(__rev0);
   return __ret;
 }
 #endif
@@ -41428,14 +41428,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float64_t vaddvq_f64(float64x2_t __p0) {
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vaddvq_f64((int8x16_t)__p0);
+  __ret = (float64_t) __builtin_neon_vaddvq_f64(__p0);
   return __ret;
 }
 #else
 __ai float64_t vaddvq_f64(float64x2_t __p0) {
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vaddvq_f64((int8x16_t)__rev0);
+  __ret = (float64_t) __builtin_neon_vaddvq_f64(__rev0);
   return __ret;
 }
 #endif
@@ -41443,14 +41443,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vaddvq_f32(float32x4_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddvq_f32((int8x16_t)__p0);
+  __ret = (float32_t) __builtin_neon_vaddvq_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vaddvq_f32(float32x4_t __p0) {
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddvq_f32((int8x16_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vaddvq_f32(__rev0);
   return __ret;
 }
 #endif
@@ -41458,14 +41458,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32_t vaddvq_s32(int32x4_t __p0) {
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddvq_s32((int8x16_t)__p0);
+  __ret = (int32_t) __builtin_neon_vaddvq_s32(__p0);
   return __ret;
 }
 #else
 __ai int32_t vaddvq_s32(int32x4_t __p0) {
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddvq_s32((int8x16_t)__rev0);
+  __ret = (int32_t) __builtin_neon_vaddvq_s32(__rev0);
   return __ret;
 }
 #endif
@@ -41473,14 +41473,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int64_t vaddvq_s64(int64x2_t __p0) {
   int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddvq_s64((int8x16_t)__p0);
+  __ret = (int64_t) __builtin_neon_vaddvq_s64(__p0);
   return __ret;
 }
 #else
 __ai int64_t vaddvq_s64(int64x2_t __p0) {
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddvq_s64((int8x16_t)__rev0);
+  __ret = (int64_t) __builtin_neon_vaddvq_s64(__rev0);
   return __ret;
 }
 #endif
@@ -41488,14 +41488,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16_t vaddvq_s16(int16x8_t __p0) {
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddvq_s16((int8x16_t)__p0);
+  __ret = (int16_t) __builtin_neon_vaddvq_s16(__p0);
   return __ret;
 }
 #else
 __ai int16_t vaddvq_s16(int16x8_t __p0) {
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddvq_s16((int8x16_t)__rev0);
+  __ret = (int16_t) __builtin_neon_vaddvq_s16(__rev0);
   return __ret;
 }
 #endif
@@ -41503,14 +41503,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint8_t vaddv_u8(uint8x8_t __p0) {
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddv_u8((int8x8_t)__p0);
+  __ret = (uint8_t) __builtin_neon_vaddv_u8(__p0);
   return __ret;
 }
 #else
 __ai uint8_t vaddv_u8(uint8x8_t __p0) {
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddv_u8((int8x8_t)__rev0);
+  __ret = (uint8_t) __builtin_neon_vaddv_u8(__rev0);
   return __ret;
 }
 #endif
@@ -41518,14 +41518,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32_t vaddv_u32(uint32x2_t __p0) {
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddv_u32((int8x8_t)__p0);
+  __ret = (uint32_t) __builtin_neon_vaddv_u32(__p0);
   return __ret;
 }
 #else
 __ai uint32_t vaddv_u32(uint32x2_t __p0) {
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddv_u32((int8x8_t)__rev0);
+  __ret = (uint32_t) __builtin_neon_vaddv_u32(__rev0);
   return __ret;
 }
 #endif
@@ -41533,14 +41533,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint16_t vaddv_u16(uint16x4_t __p0) {
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddv_u16((int8x8_t)__p0);
+  __ret = (uint16_t) __builtin_neon_vaddv_u16(__p0);
   return __ret;
 }
 #else
 __ai uint16_t vaddv_u16(uint16x4_t __p0) {
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddv_u16((int8x8_t)__rev0);
+  __ret = (uint16_t) __builtin_neon_vaddv_u16(__rev0);
   return __ret;
 }
 #endif
@@ -41548,14 +41548,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int8_t vaddv_s8(int8x8_t __p0) {
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddv_s8((int8x8_t)__p0);
+  __ret = (int8_t) __builtin_neon_vaddv_s8(__p0);
   return __ret;
 }
 #else
 __ai int8_t vaddv_s8(int8x8_t __p0) {
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddv_s8((int8x8_t)__rev0);
+  __ret = (int8_t) __builtin_neon_vaddv_s8(__rev0);
   return __ret;
 }
 #endif
@@ -41563,14 +41563,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vaddv_f32(float32x2_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddv_f32((int8x8_t)__p0);
+  __ret = (float32_t) __builtin_neon_vaddv_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vaddv_f32(float32x2_t __p0) {
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddv_f32((int8x8_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vaddv_f32(__rev0);
   return __ret;
 }
 #endif
@@ -41578,14 +41578,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32_t vaddv_s32(int32x2_t __p0) {
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddv_s32((int8x8_t)__p0);
+  __ret = (int32_t) __builtin_neon_vaddv_s32(__p0);
   return __ret;
 }
 #else
 __ai int32_t vaddv_s32(int32x2_t __p0) {
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddv_s32((int8x8_t)__rev0);
+  __ret = (int32_t) __builtin_neon_vaddv_s32(__rev0);
   return __ret;
 }
 #endif
@@ -41593,14 +41593,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16_t vaddv_s16(int16x4_t __p0) {
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddv_s16((int8x8_t)__p0);
+  __ret = (int16_t) __builtin_neon_vaddv_s16(__p0);
   return __ret;
 }
 #else
 __ai int16_t vaddv_s16(int16x4_t __p0) {
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddv_s16((int8x8_t)__rev0);
+  __ret = (int16_t) __builtin_neon_vaddv_s16(__rev0);
   return __ret;
 }
 #endif
@@ -44852,7 +44852,7 @@
 #define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
   poly8x8_t __s0 = __p0; \
   poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
+  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -44860,7 +44860,7 @@
   poly8x8_t __s0 = __p0; \
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
+  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -44869,7 +44869,7 @@
 #define vduph_lane_p16(__p0, __p1) __extension__ ({ \
   poly16x4_t __s0 = __p0; \
   poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
+  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -44877,7 +44877,7 @@
   poly16x4_t __s0 = __p0; \
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
+  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -44903,7 +44903,7 @@
 #define vdups_lane_u32(__p0, __p1) __extension__ ({ \
   uint32x2_t __s0 = __p0; \
   uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__s0, __p1); \
+  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -44911,7 +44911,7 @@
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__rev0, __p1); \
+  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -44919,14 +44919,14 @@
 #define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
   uint64x1_t __s0 = __p0; \
   uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
+  __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vduph_lane_u16(__p0, __p1) __extension__ ({ \
   uint16x4_t __s0 = __p0; \
   uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
+  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -44934,7 +44934,7 @@
   uint16x4_t __s0 = __p0; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
+  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -44959,14 +44959,14 @@
 #define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
   float64x1_t __s0 = __p0; \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_lane_f64((int8x8_t)__s0, __p1); \
+  __ret = (float64_t) __builtin_neon_vdupd_lane_f64((float64x1_t)__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vdups_lane_f32(__p0, __p1) __extension__ ({ \
   float32x2_t __s0 = __p0; \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_lane_f32((int8x8_t)__s0, __p1); \
+  __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -44974,7 +44974,7 @@
   float32x2_t __s0 = __p0; \
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_lane_f32((int8x8_t)__rev0, __p1); \
+  __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -44983,7 +44983,7 @@
 #define vdups_lane_s32(__p0, __p1) __extension__ ({ \
   int32x2_t __s0 = __p0; \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__s0, __p1); \
+  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -44991,7 +44991,7 @@
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__rev0, __p1); \
+  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -44999,14 +44999,14 @@
 #define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
   int64x1_t __s0 = __p0; \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
+  __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vduph_lane_s16(__p0, __p1) __extension__ ({ \
   int16x4_t __s0 = __p0; \
   int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
+  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45014,7 +45014,7 @@
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
+  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45105,7 +45105,7 @@
 #define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
   poly8x16_t __s0 = __p0; \
   poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
+  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45113,7 +45113,7 @@
   poly8x16_t __s0 = __p0; \
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
+  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45122,7 +45122,7 @@
 #define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
   poly16x8_t __s0 = __p0; \
   poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
+  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45130,7 +45130,7 @@
   poly16x8_t __s0 = __p0; \
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
+  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45156,7 +45156,7 @@
 #define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
   uint32x4_t __s0 = __p0; \
   uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__s0, __p1); \
+  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45164,7 +45164,7 @@
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__rev0, __p1); \
+  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45173,7 +45173,7 @@
 #define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
   uint64x2_t __s0 = __p0; \
   uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__s0, __p1); \
+  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45181,7 +45181,7 @@
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__rev0, __p1); \
+  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45190,7 +45190,7 @@
 #define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
   uint16x8_t __s0 = __p0; \
   uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
+  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45198,7 +45198,7 @@
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
+  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45224,7 +45224,7 @@
 #define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
   float64x2_t __s0 = __p0; \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((int8x16_t)__s0, __p1); \
+  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45232,7 +45232,7 @@
   float64x2_t __s0 = __p0; \
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((int8x16_t)__rev0, __p1); \
+  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45241,7 +45241,7 @@
 #define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
   float32x4_t __s0 = __p0; \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((int8x16_t)__s0, __p1); \
+  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45249,7 +45249,7 @@
   float32x4_t __s0 = __p0; \
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((int8x16_t)__rev0, __p1); \
+  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45258,7 +45258,7 @@
 #define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
   int32x4_t __s0 = __p0; \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__s0, __p1); \
+  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45266,7 +45266,7 @@
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__rev0, __p1); \
+  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45275,7 +45275,7 @@
 #define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
   int64x2_t __s0 = __p0; \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__s0, __p1); \
+  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45283,7 +45283,7 @@
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__rev0, __p1); \
+  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45292,7 +45292,7 @@
 #define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
   int16x8_t __s0 = __p0; \
   int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
+  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45300,7 +45300,7 @@
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
+  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45934,7 +45934,7 @@
   float64_t __s1 = __p1; \
   float64x1_t __s2 = __p2; \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \
+  __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (float64x1_t)__s2, __p3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
@@ -45943,7 +45943,7 @@
   float32_t __s1 = __p1; \
   float32x2_t __s2 = __p2; \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__s2, __p3); \
+  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \
   __ret; \
 })
 #else
@@ -45953,7 +45953,7 @@
   float32x2_t __s2 = __p2; \
   float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__rev2, __p3); \
+  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__rev2, __p3); \
   __ret; \
 })
 #define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
@@ -45961,7 +45961,7 @@
   float32_t __s1 = __p1; \
   float32x2_t __s2 = __p2; \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__s2, __p3); \
+  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \
   __ret; \
 })
 #endif
@@ -46075,7 +46075,7 @@
   float64_t __s1 = __p1; \
   float64x2_t __s2 = __p2; \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__s2, __p3); \
+  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \
   __ret; \
 })
 #else
@@ -46085,7 +46085,7 @@
   float64x2_t __s2 = __p2; \
   float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__rev2, __p3); \
+  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__rev2, __p3); \
   __ret; \
 })
 #define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
@@ -46093,7 +46093,7 @@
   float64_t __s1 = __p1; \
   float64x2_t __s2 = __p2; \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__s2, __p3); \
+  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \
   __ret; \
 })
 #endif
@@ -46104,7 +46104,7 @@
   float32_t __s1 = __p1; \
   float32x4_t __s2 = __p2; \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__s2, __p3); \
+  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \
   __ret; \
 })
 #else
@@ -46114,7 +46114,7 @@
   float32x4_t __s2 = __p2; \
   float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__rev2, __p3); \
+  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__rev2, __p3); \
   __ret; \
 })
 #define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
@@ -46122,7 +46122,7 @@
   float32_t __s1 = __p1; \
   float32x4_t __s2 = __p2; \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__s2, __p3); \
+  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \
   __ret; \
 })
 #endif
@@ -46634,14 +46634,14 @@
 #define vget_lane_p64(__p0, __p1) __extension__ ({ \
   poly64x1_t __s0 = __p0; \
   poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
+  __ret = (poly64_t) __builtin_neon_vget_lane_i64((poly64x1_t)__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
   poly64x2_t __s0 = __p0; \
   poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -46649,13 +46649,13 @@
   poly64x2_t __s0 = __p0; \
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
+  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \
   poly64x2_t __s0 = __p0; \
   poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -46664,7 +46664,7 @@
 #define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
   float64x2_t __s0 = __p0; \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__s0, __p1); \
+  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -46672,13 +46672,13 @@
   float64x2_t __s0 = __p0; \
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__rev0, __p1); \
+  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \
   float64x2_t __s0 = __p0; \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__s0, __p1); \
+  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -46686,7 +46686,7 @@
 #define vget_lane_f64(__p0, __p1) __extension__ ({ \
   float64x1_t __s0 = __p0; \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \
+  __ret = (float64_t) __builtin_neon_vget_lane_f64((float64x1_t)__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
@@ -47227,7 +47227,7 @@
 #define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x2x2_t __s1 = __p1; \
   float64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 42); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \
   __ret; \
 })
 #else
@@ -47237,7 +47237,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   float64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 42); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -47249,7 +47249,7 @@
 #define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x2x2_t __s1 = __p1; \
   int64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 35); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \
   __ret; \
 })
 #else
@@ -47259,7 +47259,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   int64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 35); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -47276,13 +47276,13 @@
 #define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x1x2_t __s1 = __p1; \
   float64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 10); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \
   __ret; \
 })
 #define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x1x2_t __s1 = __p1; \
   int64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 3); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \
   __ret; \
 })
 #define vld3_p64(__p0) __extension__ ({ \
@@ -47543,7 +47543,7 @@
 #define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x2x3_t __s1 = __p1; \
   float64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 42); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \
   __ret; \
 })
 #else
@@ -47554,7 +47554,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   float64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 42); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -47567,7 +47567,7 @@
 #define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x2x3_t __s1 = __p1; \
   int64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 35); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \
   __ret; \
 })
 #else
@@ -47578,7 +47578,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   int64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 35); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -47596,13 +47596,13 @@
 #define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x1x3_t __s1 = __p1; \
   float64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \
   __ret; \
 })
 #define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x1x3_t __s1 = __p1; \
   int64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \
   __ret; \
 })
 #define vld4_p64(__p0) __extension__ ({ \
@@ -47879,7 +47879,7 @@
 #define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x2x4_t __s1 = __p1; \
   float64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 42); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \
   __ret; \
 })
 #else
@@ -47891,7 +47891,7 @@
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
   float64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 42); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -47905,7 +47905,7 @@
 #define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x2x4_t __s1 = __p1; \
   int64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 35); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \
   __ret; \
 })
 #else
@@ -47917,7 +47917,7 @@
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
   int64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 35); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -47936,13 +47936,13 @@
 #define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x1x4_t __s1 = __p1; \
   float64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \
   __ret; \
 })
 #define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x1x4_t __s1 = __p1; \
   int64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \
   __ret; \
 })
 #define vldrq_p128(__p0) __extension__ ({ \
@@ -47975,14 +47975,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64((int8x16_t)__p0);
+  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__p0);
   return __ret;
 }
 #else
 __ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64((int8x16_t)__rev0);
+  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__rev0);
   return __ret;
 }
 #endif
@@ -47990,14 +47990,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32((int8x16_t)__p0);
+  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32((int8x16_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__rev0);
   return __ret;
 }
 #endif
@@ -48005,14 +48005,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vmaxnmv_f32(float32x2_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmv_f32((int8x8_t)__p0);
+  __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vmaxnmv_f32(float32x2_t __p0) {
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmv_f32((int8x8_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__rev0);
   return __ret;
 }
 #endif
@@ -48020,14 +48020,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxvq_u8((int8x16_t)__p0);
+  __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__p0);
   return __ret;
 }
 #else
 __ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxvq_u8((int8x16_t)__rev0);
+  __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__rev0);
   return __ret;
 }
 #endif
@@ -48035,14 +48035,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxvq_u32((int8x16_t)__p0);
+  __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__p0);
   return __ret;
 }
 #else
 __ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxvq_u32((int8x16_t)__rev0);
+  __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__rev0);
   return __ret;
 }
 #endif
@@ -48050,14 +48050,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxvq_u16((int8x16_t)__p0);
+  __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__p0);
   return __ret;
 }
 #else
 __ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxvq_u16((int8x16_t)__rev0);
+  __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__rev0);
   return __ret;
 }
 #endif
@@ -48065,14 +48065,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int8_t vmaxvq_s8(int8x16_t __p0) {
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxvq_s8((int8x16_t)__p0);
+  __ret = (int8_t) __builtin_neon_vmaxvq_s8(__p0);
   return __ret;
 }
 #else
 __ai int8_t vmaxvq_s8(int8x16_t __p0) {
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxvq_s8((int8x16_t)__rev0);
+  __ret = (int8_t) __builtin_neon_vmaxvq_s8(__rev0);
   return __ret;
 }
 #endif
@@ -48080,14 +48080,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float64_t vmaxvq_f64(float64x2_t __p0) {
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxvq_f64((int8x16_t)__p0);
+  __ret = (float64_t) __builtin_neon_vmaxvq_f64(__p0);
   return __ret;
 }
 #else
 __ai float64_t vmaxvq_f64(float64x2_t __p0) {
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxvq_f64((int8x16_t)__rev0);
+  __ret = (float64_t) __builtin_neon_vmaxvq_f64(__rev0);
   return __ret;
 }
 #endif
@@ -48095,14 +48095,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vmaxvq_f32(float32x4_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxvq_f32((int8x16_t)__p0);
+  __ret = (float32_t) __builtin_neon_vmaxvq_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vmaxvq_f32(float32x4_t __p0) {
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxvq_f32((int8x16_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vmaxvq_f32(__rev0);
   return __ret;
 }
 #endif
@@ -48110,14 +48110,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32_t vmaxvq_s32(int32x4_t __p0) {
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxvq_s32((int8x16_t)__p0);
+  __ret = (int32_t) __builtin_neon_vmaxvq_s32(__p0);
   return __ret;
 }
 #else
 __ai int32_t vmaxvq_s32(int32x4_t __p0) {
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxvq_s32((int8x16_t)__rev0);
+  __ret = (int32_t) __builtin_neon_vmaxvq_s32(__rev0);
   return __ret;
 }
 #endif
@@ -48125,14 +48125,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16_t vmaxvq_s16(int16x8_t __p0) {
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxvq_s16((int8x16_t)__p0);
+  __ret = (int16_t) __builtin_neon_vmaxvq_s16(__p0);
   return __ret;
 }
 #else
 __ai int16_t vmaxvq_s16(int16x8_t __p0) {
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxvq_s16((int8x16_t)__rev0);
+  __ret = (int16_t) __builtin_neon_vmaxvq_s16(__rev0);
   return __ret;
 }
 #endif
@@ -48140,14 +48140,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint8_t vmaxv_u8(uint8x8_t __p0) {
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxv_u8((int8x8_t)__p0);
+  __ret = (uint8_t) __builtin_neon_vmaxv_u8(__p0);
   return __ret;
 }
 #else
 __ai uint8_t vmaxv_u8(uint8x8_t __p0) {
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxv_u8((int8x8_t)__rev0);
+  __ret = (uint8_t) __builtin_neon_vmaxv_u8(__rev0);
   return __ret;
 }
 #endif
@@ -48155,14 +48155,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32_t vmaxv_u32(uint32x2_t __p0) {
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxv_u32((int8x8_t)__p0);
+  __ret = (uint32_t) __builtin_neon_vmaxv_u32(__p0);
   return __ret;
 }
 #else
 __ai uint32_t vmaxv_u32(uint32x2_t __p0) {
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxv_u32((int8x8_t)__rev0);
+  __ret = (uint32_t) __builtin_neon_vmaxv_u32(__rev0);
   return __ret;
 }
 #endif
@@ -48170,14 +48170,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint16_t vmaxv_u16(uint16x4_t __p0) {
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxv_u16((int8x8_t)__p0);
+  __ret = (uint16_t) __builtin_neon_vmaxv_u16(__p0);
   return __ret;
 }
 #else
 __ai uint16_t vmaxv_u16(uint16x4_t __p0) {
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxv_u16((int8x8_t)__rev0);
+  __ret = (uint16_t) __builtin_neon_vmaxv_u16(__rev0);
   return __ret;
 }
 #endif
@@ -48185,14 +48185,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int8_t vmaxv_s8(int8x8_t __p0) {
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxv_s8((int8x8_t)__p0);
+  __ret = (int8_t) __builtin_neon_vmaxv_s8(__p0);
   return __ret;
 }
 #else
 __ai int8_t vmaxv_s8(int8x8_t __p0) {
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxv_s8((int8x8_t)__rev0);
+  __ret = (int8_t) __builtin_neon_vmaxv_s8(__rev0);
   return __ret;
 }
 #endif
@@ -48200,14 +48200,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vmaxv_f32(float32x2_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxv_f32((int8x8_t)__p0);
+  __ret = (float32_t) __builtin_neon_vmaxv_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vmaxv_f32(float32x2_t __p0) {
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxv_f32((int8x8_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vmaxv_f32(__rev0);
   return __ret;
 }
 #endif
@@ -48215,14 +48215,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32_t vmaxv_s32(int32x2_t __p0) {
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxv_s32((int8x8_t)__p0);
+  __ret = (int32_t) __builtin_neon_vmaxv_s32(__p0);
   return __ret;
 }
 #else
 __ai int32_t vmaxv_s32(int32x2_t __p0) {
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxv_s32((int8x8_t)__rev0);
+  __ret = (int32_t) __builtin_neon_vmaxv_s32(__rev0);
   return __ret;
 }
 #endif
@@ -48230,14 +48230,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16_t vmaxv_s16(int16x4_t __p0) {
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxv_s16((int8x8_t)__p0);
+  __ret = (int16_t) __builtin_neon_vmaxv_s16(__p0);
   return __ret;
 }
 #else
 __ai int16_t vmaxv_s16(int16x4_t __p0) {
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxv_s16((int8x8_t)__rev0);
+  __ret = (int16_t) __builtin_neon_vmaxv_s16(__rev0);
   return __ret;
 }
 #endif
@@ -48267,14 +48267,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float64_t vminnmvq_f64(float64x2_t __p0) {
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminnmvq_f64((int8x16_t)__p0);
+  __ret = (float64_t) __builtin_neon_vminnmvq_f64(__p0);
   return __ret;
 }
 #else
 __ai float64_t vminnmvq_f64(float64x2_t __p0) {
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminnmvq_f64((int8x16_t)__rev0);
+  __ret = (float64_t) __builtin_neon_vminnmvq_f64(__rev0);
   return __ret;
 }
 #endif
@@ -48282,14 +48282,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vminnmvq_f32(float32x4_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmvq_f32((int8x16_t)__p0);
+  __ret = (float32_t) __builtin_neon_vminnmvq_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vminnmvq_f32(float32x4_t __p0) {
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmvq_f32((int8x16_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vminnmvq_f32(__rev0);
   return __ret;
 }
 #endif
@@ -48297,14 +48297,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vminnmv_f32(float32x2_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmv_f32((int8x8_t)__p0);
+  __ret = (float32_t) __builtin_neon_vminnmv_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vminnmv_f32(float32x2_t __p0) {
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmv_f32((int8x8_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vminnmv_f32(__rev0);
   return __ret;
 }
 #endif
@@ -48312,14 +48312,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint8_t vminvq_u8(uint8x16_t __p0) {
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminvq_u8((int8x16_t)__p0);
+  __ret = (uint8_t) __builtin_neon_vminvq_u8(__p0);
   return __ret;
 }
 #else
 __ai uint8_t vminvq_u8(uint8x16_t __p0) {
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminvq_u8((int8x16_t)__rev0);
+  __ret = (uint8_t) __builtin_neon_vminvq_u8(__rev0);
   return __ret;
 }
 #endif
@@ -48327,14 +48327,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32_t vminvq_u32(uint32x4_t __p0) {
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminvq_u32((int8x16_t)__p0);
+  __ret = (uint32_t) __builtin_neon_vminvq_u32(__p0);
   return __ret;
 }
 #else
 __ai uint32_t vminvq_u32(uint32x4_t __p0) {
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminvq_u32((int8x16_t)__rev0);
+  __ret = (uint32_t) __builtin_neon_vminvq_u32(__rev0);
   return __ret;
 }
 #endif
@@ -48342,14 +48342,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint16_t vminvq_u16(uint16x8_t __p0) {
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminvq_u16((int8x16_t)__p0);
+  __ret = (uint16_t) __builtin_neon_vminvq_u16(__p0);
   return __ret;
 }
 #else
 __ai uint16_t vminvq_u16(uint16x8_t __p0) {
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminvq_u16((int8x16_t)__rev0);
+  __ret = (uint16_t) __builtin_neon_vminvq_u16(__rev0);
   return __ret;
 }
 #endif
@@ -48357,14 +48357,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int8_t vminvq_s8(int8x16_t __p0) {
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminvq_s8((int8x16_t)__p0);
+  __ret = (int8_t) __builtin_neon_vminvq_s8(__p0);
   return __ret;
 }
 #else
 __ai int8_t vminvq_s8(int8x16_t __p0) {
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminvq_s8((int8x16_t)__rev0);
+  __ret = (int8_t) __builtin_neon_vminvq_s8(__rev0);
   return __ret;
 }
 #endif
@@ -48372,14 +48372,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float64_t vminvq_f64(float64x2_t __p0) {
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminvq_f64((int8x16_t)__p0);
+  __ret = (float64_t) __builtin_neon_vminvq_f64(__p0);
   return __ret;
 }
 #else
 __ai float64_t vminvq_f64(float64x2_t __p0) {
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminvq_f64((int8x16_t)__rev0);
+  __ret = (float64_t) __builtin_neon_vminvq_f64(__rev0);
   return __ret;
 }
 #endif
@@ -48387,14 +48387,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vminvq_f32(float32x4_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminvq_f32((int8x16_t)__p0);
+  __ret = (float32_t) __builtin_neon_vminvq_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vminvq_f32(float32x4_t __p0) {
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminvq_f32((int8x16_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vminvq_f32(__rev0);
   return __ret;
 }
 #endif
@@ -48402,14 +48402,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32_t vminvq_s32(int32x4_t __p0) {
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminvq_s32((int8x16_t)__p0);
+  __ret = (int32_t) __builtin_neon_vminvq_s32(__p0);
   return __ret;
 }
 #else
 __ai int32_t vminvq_s32(int32x4_t __p0) {
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminvq_s32((int8x16_t)__rev0);
+  __ret = (int32_t) __builtin_neon_vminvq_s32(__rev0);
   return __ret;
 }
 #endif
@@ -48417,14 +48417,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16_t vminvq_s16(int16x8_t __p0) {
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminvq_s16((int8x16_t)__p0);
+  __ret = (int16_t) __builtin_neon_vminvq_s16(__p0);
   return __ret;
 }
 #else
 __ai int16_t vminvq_s16(int16x8_t __p0) {
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminvq_s16((int8x16_t)__rev0);
+  __ret = (int16_t) __builtin_neon_vminvq_s16(__rev0);
   return __ret;
 }
 #endif
@@ -48432,14 +48432,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint8_t vminv_u8(uint8x8_t __p0) {
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminv_u8((int8x8_t)__p0);
+  __ret = (uint8_t) __builtin_neon_vminv_u8(__p0);
   return __ret;
 }
 #else
 __ai uint8_t vminv_u8(uint8x8_t __p0) {
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminv_u8((int8x8_t)__rev0);
+  __ret = (uint8_t) __builtin_neon_vminv_u8(__rev0);
   return __ret;
 }
 #endif
@@ -48447,14 +48447,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32_t vminv_u32(uint32x2_t __p0) {
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminv_u32((int8x8_t)__p0);
+  __ret = (uint32_t) __builtin_neon_vminv_u32(__p0);
   return __ret;
 }
 #else
 __ai uint32_t vminv_u32(uint32x2_t __p0) {
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminv_u32((int8x8_t)__rev0);
+  __ret = (uint32_t) __builtin_neon_vminv_u32(__rev0);
   return __ret;
 }
 #endif
@@ -48462,14 +48462,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint16_t vminv_u16(uint16x4_t __p0) {
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminv_u16((int8x8_t)__p0);
+  __ret = (uint16_t) __builtin_neon_vminv_u16(__p0);
   return __ret;
 }
 #else
 __ai uint16_t vminv_u16(uint16x4_t __p0) {
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminv_u16((int8x8_t)__rev0);
+  __ret = (uint16_t) __builtin_neon_vminv_u16(__rev0);
   return __ret;
 }
 #endif
@@ -48477,14 +48477,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int8_t vminv_s8(int8x8_t __p0) {
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminv_s8((int8x8_t)__p0);
+  __ret = (int8_t) __builtin_neon_vminv_s8(__p0);
   return __ret;
 }
 #else
 __ai int8_t vminv_s8(int8x8_t __p0) {
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminv_s8((int8x8_t)__rev0);
+  __ret = (int8_t) __builtin_neon_vminv_s8(__rev0);
   return __ret;
 }
 #endif
@@ -48492,14 +48492,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vminv_f32(float32x2_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminv_f32((int8x8_t)__p0);
+  __ret = (float32_t) __builtin_neon_vminv_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vminv_f32(float32x2_t __p0) {
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminv_f32((int8x8_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vminv_f32(__rev0);
   return __ret;
 }
 #endif
@@ -48507,14 +48507,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32_t vminv_s32(int32x2_t __p0) {
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminv_s32((int8x8_t)__p0);
+  __ret = (int32_t) __builtin_neon_vminv_s32(__p0);
   return __ret;
 }
 #else
 __ai int32_t vminv_s32(int32x2_t __p0) {
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminv_s32((int8x8_t)__rev0);
+  __ret = (int32_t) __builtin_neon_vminv_s32(__rev0);
   return __ret;
 }
 #endif
@@ -48522,14 +48522,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16_t vminv_s16(int16x4_t __p0) {
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminv_s16((int8x8_t)__p0);
+  __ret = (int16_t) __builtin_neon_vminv_s16(__p0);
   return __ret;
 }
 #else
 __ai int16_t vminv_s16(int16x4_t __p0) {
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminv_s16((int8x8_t)__rev0);
+  __ret = (int16_t) __builtin_neon_vminv_s16(__rev0);
   return __ret;
 }
 #endif
@@ -50321,7 +50321,7 @@
 
 __ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
   float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmul_n_f64((int8x8_t)__p0, __p1);
+  __ret = (float64x1_t) __builtin_neon_vmul_n_f64((float64x1_t)__p0, __p1);
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
@@ -51286,14 +51286,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint64_t vpaddd_u64(uint64x2_t __p0) {
   uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vpaddd_u64((int8x16_t)__p0);
+  __ret = (uint64_t) __builtin_neon_vpaddd_u64(__p0);
   return __ret;
 }
 #else
 __ai uint64_t vpaddd_u64(uint64x2_t __p0) {
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vpaddd_u64((int8x16_t)__rev0);
+  __ret = (uint64_t) __builtin_neon_vpaddd_u64(__rev0);
   return __ret;
 }
 #endif
@@ -51301,14 +51301,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float64_t vpaddd_f64(float64x2_t __p0) {
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpaddd_f64((int8x16_t)__p0);
+  __ret = (float64_t) __builtin_neon_vpaddd_f64(__p0);
   return __ret;
 }
 #else
 __ai float64_t vpaddd_f64(float64x2_t __p0) {
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpaddd_f64((int8x16_t)__rev0);
+  __ret = (float64_t) __builtin_neon_vpaddd_f64(__rev0);
   return __ret;
 }
 #endif
@@ -51316,14 +51316,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int64_t vpaddd_s64(int64x2_t __p0) {
   int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vpaddd_s64((int8x16_t)__p0);
+  __ret = (int64_t) __builtin_neon_vpaddd_s64(__p0);
   return __ret;
 }
 #else
 __ai int64_t vpaddd_s64(int64x2_t __p0) {
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vpaddd_s64((int8x16_t)__rev0);
+  __ret = (int64_t) __builtin_neon_vpaddd_s64(__rev0);
   return __ret;
 }
 #endif
@@ -51331,14 +51331,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vpadds_f32(float32x2_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpadds_f32((int8x8_t)__p0);
+  __ret = (float32_t) __builtin_neon_vpadds_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vpadds_f32(float32x2_t __p0) {
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpadds_f32((int8x8_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vpadds_f32(__rev0);
   return __ret;
 }
 #endif
@@ -51482,14 +51482,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float64_t vpmaxqd_f64(float64x2_t __p0) {
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxqd_f64((int8x16_t)__p0);
+  __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__p0);
   return __ret;
 }
 #else
 __ai float64_t vpmaxqd_f64(float64x2_t __p0) {
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxqd_f64((int8x16_t)__rev0);
+  __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__rev0);
   return __ret;
 }
 #endif
@@ -51497,14 +51497,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vpmaxs_f32(float32x2_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxs_f32((int8x8_t)__p0);
+  __ret = (float32_t) __builtin_neon_vpmaxs_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vpmaxs_f32(float32x2_t __p0) {
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxs_f32((int8x8_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vpmaxs_f32(__rev0);
   return __ret;
 }
 #endif
@@ -51563,14 +51563,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64((int8x16_t)__p0);
+  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__p0);
   return __ret;
 }
 #else
 __ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64((int8x16_t)__rev0);
+  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__rev0);
   return __ret;
 }
 #endif
@@ -51578,14 +51578,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vpmaxnms_f32(float32x2_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxnms_f32((int8x8_t)__p0);
+  __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vpmaxnms_f32(float32x2_t __p0) {
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxnms_f32((int8x8_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__rev0);
   return __ret;
 }
 #endif
@@ -51729,14 +51729,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float64_t vpminqd_f64(float64x2_t __p0) {
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminqd_f64((int8x16_t)__p0);
+  __ret = (float64_t) __builtin_neon_vpminqd_f64(__p0);
   return __ret;
 }
 #else
 __ai float64_t vpminqd_f64(float64x2_t __p0) {
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminqd_f64((int8x16_t)__rev0);
+  __ret = (float64_t) __builtin_neon_vpminqd_f64(__rev0);
   return __ret;
 }
 #endif
@@ -51744,14 +51744,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vpmins_f32(float32x2_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmins_f32((int8x8_t)__p0);
+  __ret = (float32_t) __builtin_neon_vpmins_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vpmins_f32(float32x2_t __p0) {
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmins_f32((int8x8_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vpmins_f32(__rev0);
   return __ret;
 }
 #endif
@@ -51810,14 +51810,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float64_t vpminnmqd_f64(float64x2_t __p0) {
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminnmqd_f64((int8x16_t)__p0);
+  __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__p0);
   return __ret;
 }
 #else
 __ai float64_t vpminnmqd_f64(float64x2_t __p0) {
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminnmqd_f64((int8x16_t)__rev0);
+  __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__rev0);
   return __ret;
 }
 #endif
@@ -51825,14 +51825,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vpminnms_f32(float32x2_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpminnms_f32((int8x8_t)__p0);
+  __ret = (float32_t) __builtin_neon_vpminnms_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vpminnms_f32(float32x2_t __p0) {
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpminnms_f32((int8x8_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vpminnms_f32(__rev0);
   return __ret;
 }
 #endif
@@ -52100,7 +52100,7 @@
   int32_t __s1 = __p1; \
   int32x2_t __s2 = __p2; \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, (int8x8_t)__s2, __p3); \
+  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __s2, __p3); \
   __ret; \
 })
 #else
@@ -52110,7 +52110,7 @@
   int32x2_t __s2 = __p2; \
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, (int8x8_t)__rev2, __p3); \
+  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __rev2, __p3); \
   __ret; \
 })
 #endif
@@ -52121,7 +52121,7 @@
   int16_t __s1 = __p1; \
   int16x4_t __s2 = __p2; \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, (int8x8_t)__s2, __p3); \
+  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __s2, __p3); \
   __ret; \
 })
 #else
@@ -52131,7 +52131,7 @@
   int16x4_t __s2 = __p2; \
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, (int8x8_t)__rev2, __p3); \
+  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __rev2, __p3); \
   __ret; \
 })
 #endif
@@ -52142,7 +52142,7 @@
   int32_t __s1 = __p1; \
   int32x4_t __s2 = __p2; \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, (int8x16_t)__s2, __p3); \
+  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __s2, __p3); \
   __ret; \
 })
 #else
@@ -52152,7 +52152,7 @@
   int32x4_t __s2 = __p2; \
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, (int8x16_t)__rev2, __p3); \
+  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __rev2, __p3); \
   __ret; \
 })
 #endif
@@ -52163,7 +52163,7 @@
   int16_t __s1 = __p1; \
   int16x8_t __s2 = __p2; \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, (int8x16_t)__s2, __p3); \
+  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __s2, __p3); \
   __ret; \
 })
 #else
@@ -52173,7 +52173,7 @@
   int16x8_t __s2 = __p2; \
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, (int8x16_t)__rev2, __p3); \
+  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __rev2, __p3); \
   __ret; \
 })
 #endif
@@ -52408,7 +52408,7 @@
   int32_t __s1 = __p1; \
   int32x2_t __s2 = __p2; \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, (int8x8_t)__s2, __p3); \
+  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __s2, __p3); \
   __ret; \
 })
 #else
@@ -52418,7 +52418,7 @@
   int32x2_t __s2 = __p2; \
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, (int8x8_t)__rev2, __p3); \
+  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __rev2, __p3); \
   __ret; \
 })
 #endif
@@ -52429,7 +52429,7 @@
   int16_t __s1 = __p1; \
   int16x4_t __s2 = __p2; \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, (int8x8_t)__s2, __p3); \
+  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __s2, __p3); \
   __ret; \
 })
 #else
@@ -52439,7 +52439,7 @@
   int16x4_t __s2 = __p2; \
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, (int8x8_t)__rev2, __p3); \
+  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __rev2, __p3); \
   __ret; \
 })
 #endif
@@ -52450,7 +52450,7 @@
   int32_t __s1 = __p1; \
   int32x4_t __s2 = __p2; \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, (int8x16_t)__s2, __p3); \
+  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __s2, __p3); \
   __ret; \
 })
 #else
@@ -52460,7 +52460,7 @@
   int32x4_t __s2 = __p2; \
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, (int8x16_t)__rev2, __p3); \
+  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __rev2, __p3); \
   __ret; \
 })
 #endif
@@ -52471,7 +52471,7 @@
   int16_t __s1 = __p1; \
   int16x8_t __s2 = __p2; \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, (int8x16_t)__s2, __p3); \
+  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __s2, __p3); \
   __ret; \
 })
 #else
@@ -52481,7 +52481,7 @@
   int16x8_t __s2 = __p2; \
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, (int8x16_t)__rev2, __p3); \
+  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __rev2, __p3); \
   __ret; \
 })
 #endif
@@ -55631,7 +55631,7 @@
   poly64_t __s0 = __p0; \
   poly64x1_t __s1 = __p1; \
   poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (poly64x1_t)__s1, __p2); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
@@ -55639,7 +55639,7 @@
   poly64_t __s0 = __p0; \
   poly64x2_t __s1 = __p1; \
   poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -55648,7 +55648,7 @@
   poly64x2_t __s1 = __p1; \
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
   poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
@@ -55656,7 +55656,7 @@
   poly64_t __s0 = __p0; \
   poly64x2_t __s1 = __p1; \
   poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -55666,7 +55666,7 @@
   float64_t __s0 = __p0; \
   float64x2_t __s1 = __p1; \
   float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -55675,7 +55675,7 @@
   float64x2_t __s1 = __p1; \
   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
   float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
@@ -55683,7 +55683,7 @@
   float64_t __s0 = __p0; \
   float64x2_t __s1 = __p1; \
   float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -55692,7 +55692,7 @@
   float64_t __s0 = __p0; \
   float64x1_t __s1 = __p1; \
   float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (float64x1_t)__s1, __p2); \
   __ret; \
 })
 __ai uint64_t vshld_u64(uint64_t __p0, uint64_t __p1) {
@@ -56348,7 +56348,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
   float64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 42); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \
 })
 #else
 #define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
@@ -56356,13 +56356,13 @@
   float64x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 42); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \
 })
 #endif
 
 #define vst1_f64_x2(__p0, __p1) __extension__ ({ \
   float64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 10); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \
 })
 #define vst1_p64_x3(__p0, __p1) __extension__ ({ \
   poly64x1x3_t __s1 = __p1; \
@@ -56387,7 +56387,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
   float64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 42); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \
 })
 #else
 #define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
@@ -56396,13 +56396,13 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 42); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \
 })
 #endif
 
 #define vst1_f64_x3(__p0, __p1) __extension__ ({ \
   float64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \
 })
 #define vst1_p64_x4(__p0, __p1) __extension__ ({ \
   poly64x1x4_t __s1 = __p1; \
@@ -56428,7 +56428,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
   float64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 42); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \
 })
 #else
 #define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
@@ -56438,13 +56438,13 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 42); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \
 })
 #endif
 
 #define vst1_f64_x4(__p0, __p1) __extension__ ({ \
   float64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \
 })
 #define vst2_p64(__p0, __p1) __extension__ ({ \
   poly64x1x2_t __s1 = __p1; \
@@ -56483,7 +56483,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_f64(__p0, __p1) __extension__ ({ \
   float64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 42); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \
 })
 #else
 #define vst2q_f64(__p0, __p1) __extension__ ({ \
@@ -56491,14 +56491,14 @@
   float64x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 42); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_s64(__p0, __p1) __extension__ ({ \
   int64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 35); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \
 })
 #else
 #define vst2q_s64(__p0, __p1) __extension__ ({ \
@@ -56506,13 +56506,13 @@
   int64x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 35); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \
 })
 #endif
 
 #define vst2_f64(__p0, __p1) __extension__ ({ \
   float64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 10); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \
 })
 #define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
   poly64x1x2_t __s1 = __p1; \
@@ -56596,7 +56596,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 42); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \
 })
 #else
 #define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
@@ -56604,14 +56604,14 @@
   float64x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 42); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 35); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \
 })
 #else
 #define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
@@ -56619,7 +56619,7 @@
   int64x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 35); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \
 })
 #endif
 
@@ -56629,11 +56629,11 @@
 })
 #define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 10); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \
 })
 #define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 3); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \
 })
 #define vst3_p64(__p0, __p1) __extension__ ({ \
   poly64x1x3_t __s1 = __p1; \
@@ -56674,7 +56674,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_f64(__p0, __p1) __extension__ ({ \
   float64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 42); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \
 })
 #else
 #define vst3q_f64(__p0, __p1) __extension__ ({ \
@@ -56683,14 +56683,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 42); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_s64(__p0, __p1) __extension__ ({ \
   int64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 35); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \
 })
 #else
 #define vst3q_s64(__p0, __p1) __extension__ ({ \
@@ -56699,13 +56699,13 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 35); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \
 })
 #endif
 
 #define vst3_f64(__p0, __p1) __extension__ ({ \
   float64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \
 })
 #define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
   poly64x1x3_t __s1 = __p1; \
@@ -56794,7 +56794,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 42); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \
 })
 #else
 #define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
@@ -56803,14 +56803,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 42); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 35); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \
 })
 #else
 #define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
@@ -56819,7 +56819,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 35); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \
 })
 #endif
 
@@ -56829,11 +56829,11 @@
 })
 #define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \
 })
 #define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \
 })
 #define vst4_p64(__p0, __p1) __extension__ ({ \
   poly64x1x4_t __s1 = __p1; \
@@ -56876,7 +56876,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_f64(__p0, __p1) __extension__ ({ \
   float64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 42); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \
 })
 #else
 #define vst4q_f64(__p0, __p1) __extension__ ({ \
@@ -56886,14 +56886,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 42); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_s64(__p0, __p1) __extension__ ({ \
   int64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 35); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \
 })
 #else
 #define vst4q_s64(__p0, __p1) __extension__ ({ \
@@ -56903,13 +56903,13 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 35); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \
 })
 #endif
 
 #define vst4_f64(__p0, __p1) __extension__ ({ \
   float64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \
 })
 #define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
   poly64x1x4_t __s1 = __p1; \
@@ -57003,7 +57003,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 42); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \
 })
 #else
 #define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
@@ -57013,14 +57013,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 42); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 35); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \
 })
 #else
 #define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
@@ -57030,7 +57030,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 35); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \
 })
 #endif
 
@@ -57040,11 +57040,11 @@
 })
 #define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \
 })
 #define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \
 })
 #define vstrq_p128(__p0, __p1) __extension__ ({ \
   poly128_t __s1 = __p1; \
diff --git a/darwin-x86/lib64/clang/10.0.1/include/armintr.h b/darwin-x86/lib64/clang/10.0.5/include/armintr.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/armintr.h
rename to darwin-x86/lib64/clang/10.0.5/include/armintr.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx2intrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx2intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx2intrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx2intrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512bf16intrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512bf16intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512bf16intrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512bf16intrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512bitalgintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512bitalgintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512bitalgintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512bitalgintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512bwintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512bwintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512bwintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512bwintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512cdintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512cdintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512cdintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512cdintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512dqintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512dqintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512dqintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512dqintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512erintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512erintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512erintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512erintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512fintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512fintrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512fintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512fintrin.h
index 4e341a1..698e477 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/avx512fintrin.h
+++ b/darwin-x86/lib64/clang/10.0.5/include/avx512fintrin.h
@@ -7658,13 +7658,13 @@
 #define _mm512_i32gather_ps(index, addr, scale) \
   (__m512)__builtin_ia32_gathersiv16sf((__v16sf)_mm512_undefined_ps(), \
                                        (void const *)(addr), \
-                                       (__v16sf)(__m512)(index), \
+                                       (__v16si)(__m512)(index), \
                                        (__mmask16)-1, (int)(scale))
 
 #define _mm512_mask_i32gather_ps(v1_old, mask, index, addr, scale) \
   (__m512)__builtin_ia32_gathersiv16sf((__v16sf)(__m512)(v1_old), \
                                        (void const *)(addr), \
-                                       (__v16sf)(__m512)(index), \
+                                       (__v16si)(__m512)(index), \
                                        (__mmask16)(mask), (int)(scale))
 
 #define _mm512_i32gather_epi32(index, addr, scale) \
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512ifmaintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512ifmaintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512ifmaintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512ifmaintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512ifmavlintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512ifmavlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512ifmavlintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512ifmavlintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512pfintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512pfintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512pfintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512pfintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512vbmi2intrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512vbmi2intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512vbmi2intrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512vbmi2intrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512vbmiintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512vbmiintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512vbmiintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512vbmiintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512vbmivlintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512vbmivlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512vbmivlintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512vbmivlintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512vlbf16intrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512vlbf16intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512vlbf16intrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512vlbf16intrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512vlbitalgintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512vlbitalgintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512vlbitalgintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512vlbitalgintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512vlbwintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512vlbwintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512vlbwintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512vlbwintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512vlcdintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512vlcdintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512vlcdintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512vlcdintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512vldqintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512vldqintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512vldqintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512vldqintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512vlintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512vlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512vlintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512vlintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512vlvbmi2intrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512vlvbmi2intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512vlvbmi2intrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512vlvbmi2intrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512vlvnniintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512vlvnniintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512vlvnniintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512vlvnniintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512vlvp2intersectintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512vlvp2intersectintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512vlvp2intersectintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512vlvp2intersectintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512vnniintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512vnniintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512vnniintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512vnniintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512vp2intersectintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512vp2intersectintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512vp2intersectintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512vp2intersectintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512vpopcntdqintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512vpopcntdqintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512vpopcntdqintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512vpopcntdqintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512vpopcntdqvlintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avx512vpopcntdqvlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avx512vpopcntdqvlintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avx512vpopcntdqvlintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avxintrin.h b/darwin-x86/lib64/clang/10.0.5/include/avxintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/avxintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/avxintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/bits/stdatomic.h b/darwin-x86/lib64/clang/10.0.5/include/bits/stdatomic.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/bits/stdatomic.h
rename to darwin-x86/lib64/clang/10.0.5/include/bits/stdatomic.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/bmi2intrin.h b/darwin-x86/lib64/clang/10.0.5/include/bmi2intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/bmi2intrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/bmi2intrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/bmiintrin.h b/darwin-x86/lib64/clang/10.0.5/include/bmiintrin.h
similarity index 98%
rename from darwin-x86/lib64/clang/10.0.1/include/bmiintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/bmiintrin.h
index b7af62f..841bd84 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/bmiintrin.h
+++ b/darwin-x86/lib64/clang/10.0.5/include/bmiintrin.h
@@ -14,27 +14,13 @@
 #ifndef __BMIINTRIN_H
 #define __BMIINTRIN_H
 
-#define _tzcnt_u16(a)     (__tzcnt_u16((a)))
-
-#define _andn_u32(a, b)   (__andn_u32((a), (b)))
-
-/* _bextr_u32 != __bextr_u32 */
-#define _blsi_u32(a)      (__blsi_u32((a)))
-
-#define _blsmsk_u32(a)    (__blsmsk_u32((a)))
-
-#define _blsr_u32(a)      (__blsr_u32((a)))
-
-#define _tzcnt_u32(a)     (__tzcnt_u32((a)))
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi")))
-
 /* Allow using the tzcnt intrinsics even for non-BMI targets. Since the TZCNT
    instruction behaves as BSF on non-BMI targets, there is code that expects
    to use it as a potentially faster version of BSF. */
 #define __RELAXED_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
 
+#define _tzcnt_u16(a)     (__tzcnt_u16((a)))
+
 /// Counts the number of trailing zero bits in the operand.
 ///
 /// \headerfile <x86intrin.h>
@@ -51,6 +37,94 @@
   return __builtin_ia32_tzcnt_u16(__X);
 }
 
+/// Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
+///
+/// \param __X
+///    An unsigned 32-bit integer whose trailing zeros are to be counted.
+/// \returns An unsigned 32-bit integer containing the number of trailing zero
+///    bits in the operand.
+static __inline__ unsigned int __RELAXED_FN_ATTRS
+__tzcnt_u32(unsigned int __X)
+{
+  return __builtin_ia32_tzcnt_u32(__X);
+}
+
+/// Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
+///
+/// \param __X
+///    An unsigned 32-bit integer whose trailing zeros are to be counted.
+/// \returns An 32-bit integer containing the number of trailing zero bits in
+///    the operand.
+static __inline__ int __RELAXED_FN_ATTRS
+_mm_tzcnt_32(unsigned int __X)
+{
+  return __builtin_ia32_tzcnt_u32(__X);
+}
+
+#define _tzcnt_u32(a)     (__tzcnt_u32((a)))
+
+#ifdef __x86_64__
+
+/// Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
+///
+/// \param __X
+///    An unsigned 64-bit integer whose trailing zeros are to be counted.
+/// \returns An unsigned 64-bit integer containing the number of trailing zero
+///    bits in the operand.
+static __inline__ unsigned long long __RELAXED_FN_ATTRS
+__tzcnt_u64(unsigned long long __X)
+{
+  return __builtin_ia32_tzcnt_u64(__X);
+}
+
+/// Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
+///
+/// \param __X
+///    An unsigned 64-bit integer whose trailing zeros are to be counted.
+/// \returns An 64-bit integer containing the number of trailing zero bits in
+///    the operand.
+static __inline__ long long __RELAXED_FN_ATTRS
+_mm_tzcnt_64(unsigned long long __X)
+{
+  return __builtin_ia32_tzcnt_u64(__X);
+}
+
+#define _tzcnt_u64(a)     (__tzcnt_u64((a)))
+
+#endif /* __x86_64__ */
+
+#undef __RELAXED_FN_ATTRS
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__)
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi")))
+
+#define _andn_u32(a, b)   (__andn_u32((a), (b)))
+
+/* _bextr_u32 != __bextr_u32 */
+#define _blsi_u32(a)      (__blsi_u32((a)))
+
+#define _blsmsk_u32(a)    (__blsmsk_u32((a)))
+
+#define _blsr_u32(a)      (__blsr_u32((a)))
+
 /// Performs a bitwise AND of the second operand with the one's
 ///    complement of the first operand.
 ///
@@ -169,38 +243,6 @@
   return __X & (__X - 1);
 }
 
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 32-bit integer whose trailing zeros are to be counted.
-/// \returns An unsigned 32-bit integer containing the number of trailing zero
-///    bits in the operand.
-static __inline__ unsigned int __RELAXED_FN_ATTRS
-__tzcnt_u32(unsigned int __X)
-{
-  return __builtin_ia32_tzcnt_u32(__X);
-}
-
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 32-bit integer whose trailing zeros are to be counted.
-/// \returns An 32-bit integer containing the number of trailing zero bits in
-///    the operand.
-static __inline__ int __RELAXED_FN_ATTRS
-_mm_tzcnt_32(unsigned int __X)
-{
-  return __builtin_ia32_tzcnt_u32(__X);
-}
-
 #ifdef __x86_64__
 
 #define _andn_u64(a, b)   (__andn_u64((a), (b)))
@@ -212,8 +254,6 @@
 
 #define _blsr_u64(a)      (__blsr_u64((a)))
 
-#define _tzcnt_u64(a)     (__tzcnt_u64((a)))
-
 /// Performs a bitwise AND of the second operand with the one's
 ///    complement of the first operand.
 ///
@@ -332,41 +372,10 @@
   return __X & (__X - 1);
 }
 
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer whose trailing zeros are to be counted.
-/// \returns An unsigned 64-bit integer containing the number of trailing zero
-///    bits in the operand.
-static __inline__ unsigned long long __RELAXED_FN_ATTRS
-__tzcnt_u64(unsigned long long __X)
-{
-  return __builtin_ia32_tzcnt_u64(__X);
-}
-
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer whose trailing zeros are to be counted.
-/// \returns An 64-bit integer containing the number of trailing zero bits in
-///    the operand.
-static __inline__ long long __RELAXED_FN_ATTRS
-_mm_tzcnt_64(unsigned long long __X)
-{
-  return __builtin_ia32_tzcnt_u64(__X);
-}
-
 #endif /* __x86_64__ */
 
 #undef __DEFAULT_FN_ATTRS
-#undef __RELAXED_FN_ATTRS
+
+#endif /* !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__) */
 
 #endif /* __BMIINTRIN_H */
diff --git a/darwin-x86/lib64/clang/10.0.1/include/cetintrin.h b/darwin-x86/lib64/clang/10.0.5/include/cetintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/cetintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/cetintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/cldemoteintrin.h b/darwin-x86/lib64/clang/10.0.5/include/cldemoteintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/cldemoteintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/cldemoteintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/clflushoptintrin.h b/darwin-x86/lib64/clang/10.0.5/include/clflushoptintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/clflushoptintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/clflushoptintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/clwbintrin.h b/darwin-x86/lib64/clang/10.0.5/include/clwbintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/clwbintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/clwbintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/clzerointrin.h b/darwin-x86/lib64/clang/10.0.5/include/clzerointrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/clzerointrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/clzerointrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/cpuid.h b/darwin-x86/lib64/clang/10.0.5/include/cpuid.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/cpuid.h
rename to darwin-x86/lib64/clang/10.0.5/include/cpuid.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/cuda_wrappers/algorithm b/darwin-x86/lib64/clang/10.0.5/include/cuda_wrappers/algorithm
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/cuda_wrappers/algorithm
rename to darwin-x86/lib64/clang/10.0.5/include/cuda_wrappers/algorithm
diff --git a/darwin-x86/lib64/clang/10.0.1/include/cuda_wrappers/complex b/darwin-x86/lib64/clang/10.0.5/include/cuda_wrappers/complex
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/cuda_wrappers/complex
rename to darwin-x86/lib64/clang/10.0.5/include/cuda_wrappers/complex
diff --git a/darwin-x86/lib64/clang/10.0.1/include/cuda_wrappers/new b/darwin-x86/lib64/clang/10.0.5/include/cuda_wrappers/new
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/cuda_wrappers/new
rename to darwin-x86/lib64/clang/10.0.5/include/cuda_wrappers/new
diff --git a/darwin-x86/lib64/clang/10.0.1/include/emmintrin.h b/darwin-x86/lib64/clang/10.0.5/include/emmintrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/10.0.1/include/emmintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/emmintrin.h
index c8fefdf..f98b7e7 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/emmintrin.h
+++ b/darwin-x86/lib64/clang/10.0.5/include/emmintrin.h
@@ -2288,7 +2288,7 @@
   return (__m128i)__builtin_ia32_paddusw128((__v8hi)__a, (__v8hi)__b);
 }
 
-/// Computes the rounded avarages of corresponding elements of two
+/// Computes the rounded averages of corresponding elements of two
 ///    128-bit unsigned [16 x i8] vectors, saving each result in the
 ///    corresponding element of a 128-bit result vector of [16 x i8].
 ///
@@ -2308,7 +2308,7 @@
   return (__m128i)__builtin_ia32_pavgb128((__v16qi)__a, (__v16qi)__b);
 }
 
-/// Computes the rounded avarages of corresponding elements of two
+/// Computes the rounded averages of corresponding elements of two
 ///    128-bit unsigned [8 x i16] vectors, saving each result in the
 ///    corresponding element of a 128-bit result vector of [8 x i16].
 ///
diff --git a/darwin-x86/lib64/clang/10.0.1/include/enqcmdintrin.h b/darwin-x86/lib64/clang/10.0.5/include/enqcmdintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/enqcmdintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/enqcmdintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/f16cintrin.h b/darwin-x86/lib64/clang/10.0.5/include/f16cintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/f16cintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/f16cintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/float.h b/darwin-x86/lib64/clang/10.0.5/include/float.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/float.h
rename to darwin-x86/lib64/clang/10.0.5/include/float.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/fma4intrin.h b/darwin-x86/lib64/clang/10.0.5/include/fma4intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/fma4intrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/fma4intrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/fmaintrin.h b/darwin-x86/lib64/clang/10.0.5/include/fmaintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/fmaintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/fmaintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/fuzzer/FuzzedDataProvider.h b/darwin-x86/lib64/clang/10.0.5/include/fuzzer/FuzzedDataProvider.h
similarity index 98%
rename from darwin-x86/lib64/clang/10.0.1/include/fuzzer/FuzzedDataProvider.h
rename to darwin-x86/lib64/clang/10.0.5/include/fuzzer/FuzzedDataProvider.h
index 59ef284..fd895b7 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/fuzzer/FuzzedDataProvider.h
+++ b/darwin-x86/lib64/clang/10.0.5/include/fuzzer/FuzzedDataProvider.h
@@ -196,7 +196,7 @@
     // Use different integral types for different floating point types in order
     // to provide better density of the resulting values.
     using IntegralType =
-        typename std::conditional<sizeof(T) <= sizeof(uint32_t), uint32_t,
+        typename std::conditional<(sizeof(T) <= sizeof(uint32_t)), uint32_t,
                                   uint64_t>::type;
 
     T result = static_cast<T>(ConsumeIntegral<IntegralType>());
@@ -284,9 +284,9 @@
 
     // Avoid using implementation-defined unsigned to signer conversions.
     // To learn more, see https://stackoverflow.com/questions/13150449.
-    if (value <= std::numeric_limits<TS>::max())
+    if (value <= std::numeric_limits<TS>::max()) {
       return static_cast<TS>(value);
-    else {
+    } else {
       constexpr auto TS_min = std::numeric_limits<TS>::min();
       return TS_min + static_cast<char>(value - TS_min);
     }
diff --git a/darwin-x86/lib64/clang/10.0.1/include/fxsrintrin.h b/darwin-x86/lib64/clang/10.0.5/include/fxsrintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/fxsrintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/fxsrintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/gfniintrin.h b/darwin-x86/lib64/clang/10.0.5/include/gfniintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/gfniintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/gfniintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/htmintrin.h b/darwin-x86/lib64/clang/10.0.5/include/htmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/htmintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/htmintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/htmxlintrin.h b/darwin-x86/lib64/clang/10.0.5/include/htmxlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/htmxlintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/htmxlintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/ia32intrin.h b/darwin-x86/lib64/clang/10.0.5/include/ia32intrin.h
similarity index 83%
rename from darwin-x86/lib64/clang/10.0.1/include/ia32intrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/ia32intrin.h
index 8e38df7..79b7f06 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/ia32intrin.h
+++ b/darwin-x86/lib64/clang/10.0.5/include/ia32intrin.h
@@ -195,6 +195,74 @@
 }
 #endif /* !__x86_64__ */
 
+/** Cast a 32-bit float value to a 32-bit unsigned integer value
+ *
+ *  \headerfile <x86intrin.h>
+ *  This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction in x86_64,
+ *  and corresponds to the <c> VMOVL / MOVL </c> instruction in ia32.
+ *
+ *  \param __A
+ *     A 32-bit float value.
+ *  \returns a 32-bit unsigned integer containing the converted value.
+ */
+static __inline__ unsigned int __attribute__((__always_inline__))
+_castf32_u32(float __A) {
+  unsigned int D;
+  __builtin_memcpy(&D, &__A, sizeof(__A));
+  return D;
+}
+
+/** Cast a 64-bit float value to a 64-bit unsigned integer value
+ *
+ *  \headerfile <x86intrin.h>
+ *  This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction in x86_64,
+ *  and corresponds to the <c> VMOVL / MOVL </c> instruction in ia32.
+ *
+ *  \param __A
+ *     A 64-bit float value.
+ *  \returns a 64-bit unsigned integer containing the converted value.
+ */
+static __inline__ unsigned long long __attribute__((__always_inline__))
+_castf64_u64(double __A) {
+  unsigned long long D;
+  __builtin_memcpy(&D, &__A, sizeof(__A));
+  return D;
+}
+
+/** Cast a 32-bit unsigned integer value to a 32-bit float value
+ *
+ *  \headerfile <x86intrin.h>
+ *  This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction in x86_64,
+ *  and corresponds to the <c> FLDS </c> instruction in ia32.
+ *
+ *  \param __A
+ *     A 32-bit unsigned integer value.
+ *  \returns a 32-bit float value containing the converted value.
+ */
+static __inline__ float __attribute__((__always_inline__))
+_castu32_f32(unsigned int __A) {
+  float D;
+  __builtin_memcpy(&D, &__A, sizeof(__A));
+  return D;
+}
+
+/** Cast a 64-bit unsigned integer value to a 64-bit float value
+ *
+ *  \headerfile <x86intrin.h>
+ *  This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction in x86_64,
+ *  and corresponds to the <c> FLDL </c> instruction in ia32.
+ *
+ *  \param __A
+ *     A 64-bit unsigned integer value.
+ *  \returns a 64-bit float value containing the converted value.
+ */
+static __inline__ double __attribute__((__always_inline__))
+_castu64_f64(unsigned long long __A) {
+  double D;
+  __builtin_memcpy(&D, &__A, sizeof(__A));
+  return D;
+}
+
 /** Adds the unsigned integer operand to the CRC-32C checksum of the
  *     unsigned char operand.
  *
diff --git a/darwin-x86/lib64/clang/10.0.1/include/immintrin.h b/darwin-x86/lib64/clang/10.0.5/include/immintrin.h
similarity index 99%
rename from darwin-x86/lib64/clang/10.0.1/include/immintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/immintrin.h
index 7555ad8..ae900ee 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/immintrin.h
+++ b/darwin-x86/lib64/clang/10.0.5/include/immintrin.h
@@ -64,9 +64,8 @@
 #include <vpclmulqdqintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__)
+/* No feature check desired due to internal checks */
 #include <bmiintrin.h>
-#endif
 
 #if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI2__)
 #include <bmi2intrin.h>
diff --git a/darwin-x86/lib64/clang/10.0.1/include/intrin.h b/darwin-x86/lib64/clang/10.0.5/include/intrin.h
similarity index 97%
rename from darwin-x86/lib64/clang/10.0.1/include/intrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/intrin.h
index 9786ba1..a73a576 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/intrin.h
+++ b/darwin-x86/lib64/clang/10.0.5/include/intrin.h
@@ -36,6 +36,12 @@
 /* Define the default attributes for the functions in this file. */
 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
 
+#if __x86_64__
+#define __LPTRINT_TYPE__ __int64
+#else
+#define __LPTRINT_TYPE__ long
+#endif
+
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -94,8 +100,7 @@
 void __outwordstring(unsigned short, unsigned short *, unsigned long);
 unsigned long __readcr0(void);
 unsigned long __readcr2(void);
-static __inline__
-unsigned long __readcr3(void);
+unsigned __LPTRINT_TYPE__ __readcr3(void);
 unsigned long __readcr4(void);
 unsigned long __readcr8(void);
 unsigned int __readdr(unsigned int);
@@ -132,7 +137,7 @@
 void __wbinvd(void);
 void __writecr0(unsigned int);
 static __inline__
-void __writecr3(unsigned int);
+void __writecr3(unsigned __INTPTR_TYPE__);
 void __writecr4(unsigned int);
 void __writecr8(unsigned int);
 void __writedr(unsigned int, unsigned int);
@@ -565,24 +570,26 @@
   __asm__ ("rdmsr" : "=d"(__edx), "=a"(__eax) : "c"(__register));
   return (((unsigned __int64)__edx) << 32) | (unsigned __int64)__eax;
 }
+#endif
 
-static __inline__ unsigned long __DEFAULT_FN_ATTRS
+static __inline__ unsigned __LPTRINT_TYPE__ __DEFAULT_FN_ATTRS
 __readcr3(void) {
-  unsigned long __cr3_val;
-  __asm__ __volatile__ ("mov %%cr3, %0" : "=q"(__cr3_val) : : "memory");
+  unsigned __LPTRINT_TYPE__ __cr3_val;
+  __asm__ __volatile__ ("mov %%cr3, %0" : "=r"(__cr3_val) : : "memory");
   return __cr3_val;
 }
 
 static __inline__ void __DEFAULT_FN_ATTRS
-__writecr3(unsigned int __cr3_val) {
-  __asm__ ("mov %0, %%cr3" : : "q"(__cr3_val) : "memory");
+__writecr3(unsigned __INTPTR_TYPE__ __cr3_val) {
+  __asm__ ("mov %0, %%cr3" : : "r"(__cr3_val) : "memory");
 }
-#endif
 
 #ifdef __cplusplus
 }
 #endif
 
+#undef __LPTRINT_TYPE__
+
 #undef __DEFAULT_FN_ATTRS
 
 #endif /* __INTRIN_H */
diff --git a/darwin-x86/lib64/clang/10.0.1/include/inttypes.h b/darwin-x86/lib64/clang/10.0.5/include/inttypes.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/inttypes.h
rename to darwin-x86/lib64/clang/10.0.5/include/inttypes.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/invpcidintrin.h b/darwin-x86/lib64/clang/10.0.5/include/invpcidintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/invpcidintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/invpcidintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/iso646.h b/darwin-x86/lib64/clang/10.0.5/include/iso646.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/iso646.h
rename to darwin-x86/lib64/clang/10.0.5/include/iso646.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/limits.h b/darwin-x86/lib64/clang/10.0.5/include/limits.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/limits.h
rename to darwin-x86/lib64/clang/10.0.5/include/limits.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/lwpintrin.h b/darwin-x86/lib64/clang/10.0.5/include/lwpintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/lwpintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/lwpintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/lzcntintrin.h b/darwin-x86/lib64/clang/10.0.5/include/lzcntintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/lzcntintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/lzcntintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/mm3dnow.h b/darwin-x86/lib64/clang/10.0.5/include/mm3dnow.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/mm3dnow.h
rename to darwin-x86/lib64/clang/10.0.5/include/mm3dnow.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/mm_malloc.h b/darwin-x86/lib64/clang/10.0.5/include/mm_malloc.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/mm_malloc.h
rename to darwin-x86/lib64/clang/10.0.5/include/mm_malloc.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/mmintrin.h b/darwin-x86/lib64/clang/10.0.5/include/mmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/mmintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/mmintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/module.modulemap b/darwin-x86/lib64/clang/10.0.5/include/module.modulemap
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/module.modulemap
rename to darwin-x86/lib64/clang/10.0.5/include/module.modulemap
diff --git a/darwin-x86/lib64/clang/10.0.1/include/movdirintrin.h b/darwin-x86/lib64/clang/10.0.5/include/movdirintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/movdirintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/movdirintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/msa.h b/darwin-x86/lib64/clang/10.0.5/include/msa.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/msa.h
rename to darwin-x86/lib64/clang/10.0.5/include/msa.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/mwaitxintrin.h b/darwin-x86/lib64/clang/10.0.5/include/mwaitxintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/mwaitxintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/mwaitxintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/nmmintrin.h b/darwin-x86/lib64/clang/10.0.5/include/nmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/nmmintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/nmmintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/omp-tools.h b/darwin-x86/lib64/clang/10.0.5/include/omp-tools.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/omp-tools.h
rename to darwin-x86/lib64/clang/10.0.5/include/omp-tools.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/omp.h b/darwin-x86/lib64/clang/10.0.5/include/omp.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/omp.h
rename to darwin-x86/lib64/clang/10.0.5/include/omp.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/ompt.h b/darwin-x86/lib64/clang/10.0.5/include/ompt.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/ompt.h
rename to darwin-x86/lib64/clang/10.0.5/include/ompt.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/opencl-c-base.h b/darwin-x86/lib64/clang/10.0.5/include/opencl-c-base.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/opencl-c-base.h
rename to darwin-x86/lib64/clang/10.0.5/include/opencl-c-base.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/opencl-c.h b/darwin-x86/lib64/clang/10.0.5/include/opencl-c.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/opencl-c.h
rename to darwin-x86/lib64/clang/10.0.5/include/opencl-c.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/openmp_wrappers/__clang_openmp_math.h b/darwin-x86/lib64/clang/10.0.5/include/openmp_wrappers/__clang_openmp_math.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/openmp_wrappers/__clang_openmp_math.h
rename to darwin-x86/lib64/clang/10.0.5/include/openmp_wrappers/__clang_openmp_math.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/openmp_wrappers/__clang_openmp_math_declares.h b/darwin-x86/lib64/clang/10.0.5/include/openmp_wrappers/__clang_openmp_math_declares.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/openmp_wrappers/__clang_openmp_math_declares.h
rename to darwin-x86/lib64/clang/10.0.5/include/openmp_wrappers/__clang_openmp_math_declares.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/openmp_wrappers/cmath b/darwin-x86/lib64/clang/10.0.5/include/openmp_wrappers/cmath
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/openmp_wrappers/cmath
rename to darwin-x86/lib64/clang/10.0.5/include/openmp_wrappers/cmath
diff --git a/darwin-x86/lib64/clang/10.0.1/include/openmp_wrappers/math.h b/darwin-x86/lib64/clang/10.0.5/include/openmp_wrappers/math.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/openmp_wrappers/math.h
rename to darwin-x86/lib64/clang/10.0.5/include/openmp_wrappers/math.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/pconfigintrin.h b/darwin-x86/lib64/clang/10.0.5/include/pconfigintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/pconfigintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/pconfigintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/pkuintrin.h b/darwin-x86/lib64/clang/10.0.5/include/pkuintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/pkuintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/pkuintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/pmmintrin.h b/darwin-x86/lib64/clang/10.0.5/include/pmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/pmmintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/pmmintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/popcntintrin.h b/darwin-x86/lib64/clang/10.0.5/include/popcntintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/popcntintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/popcntintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/ppc_wrappers/emmintrin.h b/darwin-x86/lib64/clang/10.0.5/include/ppc_wrappers/emmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/ppc_wrappers/emmintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/ppc_wrappers/emmintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/ppc_wrappers/mm_malloc.h b/darwin-x86/lib64/clang/10.0.5/include/ppc_wrappers/mm_malloc.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/ppc_wrappers/mm_malloc.h
rename to darwin-x86/lib64/clang/10.0.5/include/ppc_wrappers/mm_malloc.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/ppc_wrappers/mmintrin.h b/darwin-x86/lib64/clang/10.0.5/include/ppc_wrappers/mmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/ppc_wrappers/mmintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/ppc_wrappers/mmintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/ppc_wrappers/pmmintrin.h b/darwin-x86/lib64/clang/10.0.5/include/ppc_wrappers/pmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/ppc_wrappers/pmmintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/ppc_wrappers/pmmintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/ppc_wrappers/smmintrin.h b/darwin-x86/lib64/clang/10.0.5/include/ppc_wrappers/smmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/ppc_wrappers/smmintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/ppc_wrappers/smmintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/ppc_wrappers/tmmintrin.h b/darwin-x86/lib64/clang/10.0.5/include/ppc_wrappers/tmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/ppc_wrappers/tmmintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/ppc_wrappers/tmmintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/ppc_wrappers/xmmintrin.h b/darwin-x86/lib64/clang/10.0.5/include/ppc_wrappers/xmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/ppc_wrappers/xmmintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/ppc_wrappers/xmmintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/prfchwintrin.h b/darwin-x86/lib64/clang/10.0.5/include/prfchwintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/prfchwintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/prfchwintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/ptwriteintrin.h b/darwin-x86/lib64/clang/10.0.5/include/ptwriteintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/ptwriteintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/ptwriteintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/rdseedintrin.h b/darwin-x86/lib64/clang/10.0.5/include/rdseedintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/rdseedintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/rdseedintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/rtmintrin.h b/darwin-x86/lib64/clang/10.0.5/include/rtmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/rtmintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/rtmintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/s390intrin.h b/darwin-x86/lib64/clang/10.0.5/include/s390intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/s390intrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/s390intrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/allocator_interface.h b/darwin-x86/lib64/clang/10.0.5/include/sanitizer/allocator_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/sanitizer/allocator_interface.h
rename to darwin-x86/lib64/clang/10.0.5/include/sanitizer/allocator_interface.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/asan_interface.h b/darwin-x86/lib64/clang/10.0.5/include/sanitizer/asan_interface.h
similarity index 98%
rename from darwin-x86/lib64/clang/10.0.1/include/sanitizer/asan_interface.h
rename to darwin-x86/lib64/clang/10.0.5/include/sanitizer/asan_interface.h
index ab2dc97..6af93aa 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/asan_interface.h
+++ b/darwin-x86/lib64/clang/10.0.5/include/sanitizer/asan_interface.h
@@ -315,6 +315,10 @@
 /// functions like <c>_exit()</c> and <c>execl()</c>.
 void __asan_handle_no_return(void);
 
+/// Update allocation stack trace for the given allocation to the current stack
+/// trace. Returns 1 if successfull, 0 if not.
+int __asan_update_allocation_context(void* addr);
+
 #ifdef __cplusplus
 }  // extern "C"
 #endif
diff --git a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/common_interface_defs.h b/darwin-x86/lib64/clang/10.0.5/include/sanitizer/common_interface_defs.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/sanitizer/common_interface_defs.h
rename to darwin-x86/lib64/clang/10.0.5/include/sanitizer/common_interface_defs.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/coverage_interface.h b/darwin-x86/lib64/clang/10.0.5/include/sanitizer/coverage_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/sanitizer/coverage_interface.h
rename to darwin-x86/lib64/clang/10.0.5/include/sanitizer/coverage_interface.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/dfsan_interface.h b/darwin-x86/lib64/clang/10.0.5/include/sanitizer/dfsan_interface.h
similarity index 98%
rename from darwin-x86/lib64/clang/10.0.1/include/sanitizer/dfsan_interface.h
rename to darwin-x86/lib64/clang/10.0.5/include/sanitizer/dfsan_interface.h
index c189ee5..81546e5 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/dfsan_interface.h
+++ b/darwin-x86/lib64/clang/10.0.5/include/sanitizer/dfsan_interface.h
@@ -112,7 +112,7 @@
 }  // extern "C"
 
 template <typename T>
-void dfsan_set_label(dfsan_label label, T &data) {  // NOLINT
+void dfsan_set_label(dfsan_label label, T &data) { // NOLINT
   dfsan_set_label(label, (void *)&data, sizeof(T));
 }
 
diff --git a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/hwasan_interface.h b/darwin-x86/lib64/clang/10.0.5/include/sanitizer/hwasan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/sanitizer/hwasan_interface.h
rename to darwin-x86/lib64/clang/10.0.5/include/sanitizer/hwasan_interface.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/linux_syscall_hooks.h b/darwin-x86/lib64/clang/10.0.5/include/sanitizer/linux_syscall_hooks.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/sanitizer/linux_syscall_hooks.h
rename to darwin-x86/lib64/clang/10.0.5/include/sanitizer/linux_syscall_hooks.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/lsan_interface.h b/darwin-x86/lib64/clang/10.0.5/include/sanitizer/lsan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/sanitizer/lsan_interface.h
rename to darwin-x86/lib64/clang/10.0.5/include/sanitizer/lsan_interface.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/msan_interface.h b/darwin-x86/lib64/clang/10.0.5/include/sanitizer/msan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/sanitizer/msan_interface.h
rename to darwin-x86/lib64/clang/10.0.5/include/sanitizer/msan_interface.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/netbsd_syscall_hooks.h b/darwin-x86/lib64/clang/10.0.5/include/sanitizer/netbsd_syscall_hooks.h
similarity index 97%
rename from darwin-x86/lib64/clang/10.0.1/include/sanitizer/netbsd_syscall_hooks.h
rename to darwin-x86/lib64/clang/10.0.5/include/sanitizer/netbsd_syscall_hooks.h
index 27780e0..174b4bf 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/netbsd_syscall_hooks.h
+++ b/darwin-x86/lib64/clang/10.0.5/include/sanitizer/netbsd_syscall_hooks.h
@@ -20,8 +20,8 @@
 // DO NOT EDIT! THIS FILE HAS BEEN GENERATED!
 //
 // Generated with: generate_netbsd_syscalls.awk
-// Generated date: 2018-10-30
-// Generated from: syscalls.master,v 1.293 2018/07/31 13:00:13 rjs Exp
+// Generated date: 2019-11-01
+// Generated from: syscalls.master,v 1.296 2019/09/22 22:59:39 christos Exp
 //
 //===----------------------------------------------------------------------===//
 #ifndef SANITIZER_NETBSD_SYSCALL_HOOKS_H
@@ -1839,23 +1839,24 @@
 #define __sanitizer_syscall_post_uuidgen(res, store, count)                    \
   __sanitizer_syscall_post_impl_uuidgen(res, (long long)(store),               \
                                         (long long)(count))
-#define __sanitizer_syscall_pre_getvfsstat(buf, bufsize, flags)                \
-  __sanitizer_syscall_pre_impl_getvfsstat(                                     \
+#define __sanitizer_syscall_pre_compat_90_getvfsstat(buf, bufsize, flags)      \
+  __sanitizer_syscall_pre_impl_compat_90_getvfsstat(                           \
       (long long)(buf), (long long)(bufsize), (long long)(flags))
-#define __sanitizer_syscall_post_getvfsstat(res, buf, bufsize, flags)          \
-  __sanitizer_syscall_post_impl_getvfsstat(                                    \
+#define __sanitizer_syscall_post_compat_90_getvfsstat(res, buf, bufsize,       \
+                                                      flags)                   \
+  __sanitizer_syscall_post_impl_compat_90_getvfsstat(                          \
       res, (long long)(buf), (long long)(bufsize), (long long)(flags))
-#define __sanitizer_syscall_pre_statvfs1(path, buf, flags)                     \
-  __sanitizer_syscall_pre_impl_statvfs1((long long)(path), (long long)(buf),   \
-                                        (long long)(flags))
-#define __sanitizer_syscall_post_statvfs1(res, path, buf, flags)               \
-  __sanitizer_syscall_post_impl_statvfs1(res, (long long)(path),               \
-                                         (long long)(buf), (long long)(flags))
-#define __sanitizer_syscall_pre_fstatvfs1(fd, buf, flags)                      \
-  __sanitizer_syscall_pre_impl_fstatvfs1((long long)(fd), (long long)(buf),    \
-                                         (long long)(flags))
-#define __sanitizer_syscall_post_fstatvfs1(res, fd, buf, flags)                \
-  __sanitizer_syscall_post_impl_fstatvfs1(                                     \
+#define __sanitizer_syscall_pre_compat_90_statvfs1(path, buf, flags)           \
+  __sanitizer_syscall_pre_impl_compat_90_statvfs1(                             \
+      (long long)(path), (long long)(buf), (long long)(flags))
+#define __sanitizer_syscall_post_compat_90_statvfs1(res, path, buf, flags)     \
+  __sanitizer_syscall_post_impl_compat_90_statvfs1(                            \
+      res, (long long)(path), (long long)(buf), (long long)(flags))
+#define __sanitizer_syscall_pre_compat_90_fstatvfs1(fd, buf, flags)            \
+  __sanitizer_syscall_pre_impl_compat_90_fstatvfs1(                            \
+      (long long)(fd), (long long)(buf), (long long)(flags))
+#define __sanitizer_syscall_post_compat_90_fstatvfs1(res, fd, buf, flags)      \
+  __sanitizer_syscall_post_impl_compat_90_fstatvfs1(                           \
       res, (long long)(fd), (long long)(buf), (long long)(flags))
 #define __sanitizer_syscall_pre_compat_30_fhstatvfs1(fhp, buf, flags)          \
   __sanitizer_syscall_pre_impl_compat_30_fhstatvfs1(                           \
@@ -2143,12 +2144,13 @@
 #define __sanitizer_syscall_post___fhopen40(res, fhp, fh_size, flags)          \
   __sanitizer_syscall_post_impl___fhopen40(                                    \
       res, (long long)(fhp), (long long)(fh_size), (long long)(flags))
-#define __sanitizer_syscall_pre___fhstatvfs140(fhp, fh_size, buf, flags)       \
-  __sanitizer_syscall_pre_impl___fhstatvfs140(                                 \
+#define __sanitizer_syscall_pre_compat_90_fhstatvfs1(fhp, fh_size, buf, flags) \
+  __sanitizer_syscall_pre_impl_compat_90_fhstatvfs1(                           \
       (long long)(fhp), (long long)(fh_size), (long long)(buf),                \
       (long long)(flags))
-#define __sanitizer_syscall_post___fhstatvfs140(res, fhp, fh_size, buf, flags) \
-  __sanitizer_syscall_post_impl___fhstatvfs140(                                \
+#define __sanitizer_syscall_post_compat_90_fhstatvfs1(res, fhp, fh_size, buf,  \
+                                                      flags)                   \
+  __sanitizer_syscall_post_impl_compat_90_fhstatvfs1(                          \
       res, (long long)(fhp), (long long)(fh_size), (long long)(buf),           \
       (long long)(flags))
 #define __sanitizer_syscall_pre_compat_50___fhstat40(fhp, fh_size, sb)         \
@@ -2703,6 +2705,53 @@
                                                       clock_id)                \
   __sanitizer_syscall_post_impl_clock_getcpuclockid2(                          \
       res, (long long)(idtype), (long long)(id), (long long)(clock_id))
+#define __sanitizer_syscall_pre___getvfsstat90(buf, bufsize, flags)            \
+  __sanitizer_syscall_pre_impl___getvfsstat90(                                 \
+      (long long)(buf), (long long)(bufsize), (long long)(flags))
+#define __sanitizer_syscall_post___getvfsstat90(res, buf, bufsize, flags)      \
+  __sanitizer_syscall_post_impl___getvfsstat90(                                \
+      res, (long long)(buf), (long long)(bufsize), (long long)(flags))
+#define __sanitizer_syscall_pre___statvfs190(path, buf, flags)                 \
+  __sanitizer_syscall_pre_impl___statvfs190(                                   \
+      (long long)(path), (long long)(buf), (long long)(flags))
+#define __sanitizer_syscall_post___statvfs190(res, path, buf, flags)           \
+  __sanitizer_syscall_post_impl___statvfs190(                                  \
+      res, (long long)(path), (long long)(buf), (long long)(flags))
+#define __sanitizer_syscall_pre___fstatvfs190(fd, buf, flags)                  \
+  __sanitizer_syscall_pre_impl___fstatvfs190(                                  \
+      (long long)(fd), (long long)(buf), (long long)(flags))
+#define __sanitizer_syscall_post___fstatvfs190(res, fd, buf, flags)            \
+  __sanitizer_syscall_post_impl___fstatvfs190(                                 \
+      res, (long long)(fd), (long long)(buf), (long long)(flags))
+#define __sanitizer_syscall_pre___fhstatvfs190(fhp, fh_size, buf, flags)       \
+  __sanitizer_syscall_pre_impl___fhstatvfs190(                                 \
+      (long long)(fhp), (long long)(fh_size), (long long)(buf),                \
+      (long long)(flags))
+#define __sanitizer_syscall_post___fhstatvfs190(res, fhp, fh_size, buf, flags) \
+  __sanitizer_syscall_post_impl___fhstatvfs190(                                \
+      res, (long long)(fhp), (long long)(fh_size), (long long)(buf),           \
+      (long long)(flags))
+
+/* Compat with older releases */
+#define __sanitizer_syscall_pre_getvfsstat                                     \
+  __sanitizer_syscall_pre_compat_90_getvfsstat
+#define __sanitizer_syscall_post_getvfsstat                                    \
+  __sanitizer_syscall_post_compat_90_getvfsstat
+
+#define __sanitizer_syscall_pre_statvfs1                                       \
+  __sanitizer_syscall_pre_compat_90_statvfs1
+#define __sanitizer_syscall_post_statvfs1                                      \
+  __sanitizer_syscall_post_compat_90_statvfs1
+
+#define __sanitizer_syscall_pre_fstatvfs1                                      \
+  __sanitizer_syscall_pre_compat_90_fstatvfs1
+#define __sanitizer_syscall_post_fstatvfs1                                     \
+  __sanitizer_syscall_post_compat_90_fstatvfs1
+
+#define __sanitizer_syscall_pre___fhstatvfs140                                 \
+  __sanitizer_syscall_pre_compat_90_fhstatvfs1
+#define __sanitizer_syscall_post___fhstatvfs140                                \
+  __sanitizer_syscall_post_compat_90_fhstatvfs1
 
 #ifdef __cplusplus
 extern "C" {
@@ -4066,19 +4115,27 @@
 void __sanitizer_syscall_pre_impl_uuidgen(long long store, long long count);
 void __sanitizer_syscall_post_impl_uuidgen(long long res, long long store,
                                            long long count);
-void __sanitizer_syscall_pre_impl_getvfsstat(long long buf, long long bufsize,
-                                             long long flags);
-void __sanitizer_syscall_post_impl_getvfsstat(long long res, long long buf,
-                                              long long bufsize,
-                                              long long flags);
-void __sanitizer_syscall_pre_impl_statvfs1(long long path, long long buf,
-                                           long long flags);
-void __sanitizer_syscall_post_impl_statvfs1(long long res, long long path,
-                                            long long buf, long long flags);
-void __sanitizer_syscall_pre_impl_fstatvfs1(long long fd, long long buf,
-                                            long long flags);
-void __sanitizer_syscall_post_impl_fstatvfs1(long long res, long long fd,
-                                             long long buf, long long flags);
+void __sanitizer_syscall_pre_impl_compat_90_getvfsstat(long long buf,
+                                                       long long bufsize,
+                                                       long long flags);
+void __sanitizer_syscall_post_impl_compat_90_getvfsstat(long long res,
+                                                        long long buf,
+                                                        long long bufsize,
+                                                        long long flags);
+void __sanitizer_syscall_pre_impl_compat_90_statvfs1(long long path,
+                                                     long long buf,
+                                                     long long flags);
+void __sanitizer_syscall_post_impl_compat_90_statvfs1(long long res,
+                                                      long long path,
+                                                      long long buf,
+                                                      long long flags);
+void __sanitizer_syscall_pre_impl_compat_90_fstatvfs1(long long fd,
+                                                      long long buf,
+                                                      long long flags);
+void __sanitizer_syscall_post_impl_compat_90_fstatvfs1(long long res,
+                                                       long long fd,
+                                                       long long buf,
+                                                       long long flags);
 void __sanitizer_syscall_pre_impl_compat_30_fhstatvfs1(long long fhp,
                                                        long long buf,
                                                        long long flags);
@@ -4304,14 +4361,15 @@
 void __sanitizer_syscall_post_impl___fhopen40(long long res, long long fhp,
                                               long long fh_size,
                                               long long flags);
-void __sanitizer_syscall_pre_impl___fhstatvfs140(long long fhp,
-                                                 long long fh_size,
-                                                 long long buf,
-                                                 long long flags);
-void __sanitizer_syscall_post_impl___fhstatvfs140(long long res, long long fhp,
-                                                  long long fh_size,
-                                                  long long buf,
-                                                  long long flags);
+void __sanitizer_syscall_pre_impl_compat_90_fhstatvfs1(long long fhp,
+                                                       long long fh_size,
+                                                       long long buf,
+                                                       long long flags);
+void __sanitizer_syscall_post_impl_compat_90_fhstatvfs1(long long res,
+                                                        long long fhp,
+                                                        long long fh_size,
+                                                        long long buf,
+                                                        long long flags);
 void __sanitizer_syscall_pre_impl_compat_50___fhstat40(long long fhp,
                                                        long long fh_size,
                                                        long long sb);
@@ -4721,6 +4779,29 @@
                                                         long long idtype,
                                                         long long id,
                                                         long long clock_id);
+void __sanitizer_syscall_pre_impl___getvfsstat90(long long buf,
+                                                 long long bufsize,
+                                                 long long flags);
+void __sanitizer_syscall_post_impl___getvfsstat90(long long res, long long buf,
+                                                  long long bufsize,
+                                                  long long flags);
+void __sanitizer_syscall_pre_impl___statvfs190(long long path, long long buf,
+                                               long long flags);
+void __sanitizer_syscall_post_impl___statvfs190(long long res, long long path,
+                                                long long buf, long long flags);
+void __sanitizer_syscall_pre_impl___fstatvfs190(long long fd, long long buf,
+                                                long long flags);
+void __sanitizer_syscall_post_impl___fstatvfs190(long long res, long long fd,
+                                                 long long buf,
+                                                 long long flags);
+void __sanitizer_syscall_pre_impl___fhstatvfs190(long long fhp,
+                                                 long long fh_size,
+                                                 long long buf,
+                                                 long long flags);
+void __sanitizer_syscall_post_impl___fhstatvfs190(long long res, long long fhp,
+                                                  long long fh_size,
+                                                  long long buf,
+                                                  long long flags);
 
 #ifdef __cplusplus
 } // extern "C"
diff --git a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/scudo_interface.h b/darwin-x86/lib64/clang/10.0.5/include/sanitizer/scudo_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/sanitizer/scudo_interface.h
rename to darwin-x86/lib64/clang/10.0.5/include/sanitizer/scudo_interface.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/tsan_interface.h b/darwin-x86/lib64/clang/10.0.5/include/sanitizer/tsan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/sanitizer/tsan_interface.h
rename to darwin-x86/lib64/clang/10.0.5/include/sanitizer/tsan_interface.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/tsan_interface_atomic.h b/darwin-x86/lib64/clang/10.0.5/include/sanitizer/tsan_interface_atomic.h
similarity index 98%
rename from darwin-x86/lib64/clang/10.0.1/include/sanitizer/tsan_interface_atomic.h
rename to darwin-x86/lib64/clang/10.0.5/include/sanitizer/tsan_interface_atomic.h
index 9ce0411..8052bc1 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/tsan_interface_atomic.h
+++ b/darwin-x86/lib64/clang/10.0.5/include/sanitizer/tsan_interface_atomic.h
@@ -17,10 +17,10 @@
 extern "C" {
 #endif
 
-typedef char     __tsan_atomic8;
-typedef short    __tsan_atomic16;  // NOLINT
-typedef int      __tsan_atomic32;
-typedef long     __tsan_atomic64;  // NOLINT
+typedef char __tsan_atomic8;
+typedef short __tsan_atomic16;
+typedef int __tsan_atomic32;
+typedef long __tsan_atomic64;
 #if defined(__SIZEOF_INT128__) \
     || (__clang_major__ * 100 + __clang_minor__ >= 302)
 __extension__ typedef __int128 __tsan_atomic128;
diff --git a/darwin-x86/lib64/clang/10.0.5/include/sanitizer/ubsan_interface.h b/darwin-x86/lib64/clang/10.0.5/include/sanitizer/ubsan_interface.h
new file mode 100644
index 0000000..59fc6c3
--- /dev/null
+++ b/darwin-x86/lib64/clang/10.0.5/include/sanitizer/ubsan_interface.h
@@ -0,0 +1,32 @@
+//===-- sanitizer/ubsan_interface.h -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of UBSanitizer (UBSan).
+//
+// Public interface header.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_UBSAN_INTERFACE_H
+#define SANITIZER_UBSAN_INTERFACE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/// User-provided default option settings.
+///
+/// You can provide your own implementation of this function to return a string
+/// containing UBSan runtime options (for example,
+/// <c>verbosity=1:halt_on_error=0</c>).
+///
+/// \returns Default options string.
+const char* __ubsan_default_options(void);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // SANITIZER_UBSAN_INTERFACE_H
diff --git a/darwin-x86/lib64/clang/10.0.1/include/sgxintrin.h b/darwin-x86/lib64/clang/10.0.5/include/sgxintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/sgxintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/sgxintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/shaintrin.h b/darwin-x86/lib64/clang/10.0.5/include/shaintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/shaintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/shaintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/smmintrin.h b/darwin-x86/lib64/clang/10.0.5/include/smmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/smmintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/smmintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/stdalign.h b/darwin-x86/lib64/clang/10.0.5/include/stdalign.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/stdalign.h
rename to darwin-x86/lib64/clang/10.0.5/include/stdalign.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/stdarg.h b/darwin-x86/lib64/clang/10.0.5/include/stdarg.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/stdarg.h
rename to darwin-x86/lib64/clang/10.0.5/include/stdarg.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/stdatomic.h b/darwin-x86/lib64/clang/10.0.5/include/stdatomic.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/stdatomic.h
rename to darwin-x86/lib64/clang/10.0.5/include/stdatomic.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/stdbool.h b/darwin-x86/lib64/clang/10.0.5/include/stdbool.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/stdbool.h
rename to darwin-x86/lib64/clang/10.0.5/include/stdbool.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/stddef.h b/darwin-x86/lib64/clang/10.0.5/include/stddef.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/stddef.h
rename to darwin-x86/lib64/clang/10.0.5/include/stddef.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/stdint.h b/darwin-x86/lib64/clang/10.0.5/include/stdint.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/stdint.h
rename to darwin-x86/lib64/clang/10.0.5/include/stdint.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/stdnoreturn.h b/darwin-x86/lib64/clang/10.0.5/include/stdnoreturn.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/stdnoreturn.h
rename to darwin-x86/lib64/clang/10.0.5/include/stdnoreturn.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/tbmintrin.h b/darwin-x86/lib64/clang/10.0.5/include/tbmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/tbmintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/tbmintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/tgmath.h b/darwin-x86/lib64/clang/10.0.5/include/tgmath.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/tgmath.h
rename to darwin-x86/lib64/clang/10.0.5/include/tgmath.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/tmmintrin.h b/darwin-x86/lib64/clang/10.0.5/include/tmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/tmmintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/tmmintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/unwind.h b/darwin-x86/lib64/clang/10.0.5/include/unwind.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/unwind.h
rename to darwin-x86/lib64/clang/10.0.5/include/unwind.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/vadefs.h b/darwin-x86/lib64/clang/10.0.5/include/vadefs.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/vadefs.h
rename to darwin-x86/lib64/clang/10.0.5/include/vadefs.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/vaesintrin.h b/darwin-x86/lib64/clang/10.0.5/include/vaesintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/vaesintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/vaesintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/varargs.h b/darwin-x86/lib64/clang/10.0.5/include/varargs.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/varargs.h
rename to darwin-x86/lib64/clang/10.0.5/include/varargs.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/vecintrin.h b/darwin-x86/lib64/clang/10.0.5/include/vecintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/vecintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/vecintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/vpclmulqdqintrin.h b/darwin-x86/lib64/clang/10.0.5/include/vpclmulqdqintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/vpclmulqdqintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/vpclmulqdqintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/waitpkgintrin.h b/darwin-x86/lib64/clang/10.0.5/include/waitpkgintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/waitpkgintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/waitpkgintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/wbnoinvdintrin.h b/darwin-x86/lib64/clang/10.0.5/include/wbnoinvdintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/wbnoinvdintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/wbnoinvdintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/wmmintrin.h b/darwin-x86/lib64/clang/10.0.5/include/wmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/wmmintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/wmmintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/x86intrin.h b/darwin-x86/lib64/clang/10.0.5/include/x86intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/x86intrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/x86intrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/xmmintrin.h b/darwin-x86/lib64/clang/10.0.5/include/xmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/xmmintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/xmmintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/xopintrin.h b/darwin-x86/lib64/clang/10.0.5/include/xopintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/xopintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/xopintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/xsavecintrin.h b/darwin-x86/lib64/clang/10.0.5/include/xsavecintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/xsavecintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/xsavecintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/xsaveintrin.h b/darwin-x86/lib64/clang/10.0.5/include/xsaveintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/xsaveintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/xsaveintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/xsaveoptintrin.h b/darwin-x86/lib64/clang/10.0.5/include/xsaveoptintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/xsaveoptintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/xsaveoptintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/xsavesintrin.h b/darwin-x86/lib64/clang/10.0.5/include/xsavesintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/xsavesintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/xsavesintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/xtestintrin.h b/darwin-x86/lib64/clang/10.0.5/include/xtestintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/include/xtestintrin.h
rename to darwin-x86/lib64/clang/10.0.5/include/xtestintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/share/asan_blacklist.txt b/darwin-x86/lib64/clang/10.0.5/share/asan_blacklist.txt
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/share/asan_blacklist.txt
rename to darwin-x86/lib64/clang/10.0.5/share/asan_blacklist.txt
diff --git a/darwin-x86/lib64/clang/10.0.1/share/cfi_blacklist.txt b/darwin-x86/lib64/clang/10.0.5/share/cfi_blacklist.txt
similarity index 100%
rename from darwin-x86/lib64/clang/10.0.1/share/cfi_blacklist.txt
rename to darwin-x86/lib64/clang/10.0.5/share/cfi_blacklist.txt
diff --git a/darwin-x86/lib64/libLLVM.dylib b/darwin-x86/lib64/libLLVM.dylib
index 48bedb3..5991dcb 100755
--- a/darwin-x86/lib64/libLLVM.dylib
+++ b/darwin-x86/lib64/libLLVM.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libbase.dylib b/darwin-x86/lib64/libbase.dylib
index c840720..72ac180 100755
--- a/darwin-x86/lib64/libbase.dylib
+++ b/darwin-x86/lib64/libbase.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libc++.1.dylib b/darwin-x86/lib64/libc++.1.dylib
index c2540d0..df77e9e 100755
--- a/darwin-x86/lib64/libc++.1.dylib
+++ b/darwin-x86/lib64/libc++.1.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libc++.dylib b/darwin-x86/lib64/libc++.dylib
index bb885dd..aa5edcd 100755
--- a/darwin-x86/lib64/libc++.dylib
+++ b/darwin-x86/lib64/libc++.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libc++abi.1.dylib b/darwin-x86/lib64/libc++abi.1.dylib
index 67a86fe..569fa9c 100755
--- a/darwin-x86/lib64/libc++abi.1.dylib
+++ b/darwin-x86/lib64/libc++abi.1.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libclang_cxx.dylib b/darwin-x86/lib64/libclang_cxx.dylib
index a7842a4..3098dba 100755
--- a/darwin-x86/lib64/libclang_cxx.dylib
+++ b/darwin-x86/lib64/libclang_cxx.dylib
Binary files differ
diff --git a/darwin-x86/lib64/liblog.dylib b/darwin-x86/lib64/liblog.dylib
index 62815e9..76355e4 100755
--- a/darwin-x86/lib64/liblog.dylib
+++ b/darwin-x86/lib64/liblog.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libprotobuf-cpp-full.dylib b/darwin-x86/lib64/libprotobuf-cpp-full.dylib
index 7586cc2..a3cd14c 100755
--- a/darwin-x86/lib64/libprotobuf-cpp-full.dylib
+++ b/darwin-x86/lib64/libprotobuf-cpp-full.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libz-host.dylib b/darwin-x86/lib64/libz-host.dylib
index 60422c8..114c143 100755
--- a/darwin-x86/lib64/libz-host.dylib
+++ b/darwin-x86/lib64/libz-host.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libziparchive.dylib b/darwin-x86/lib64/libziparchive.dylib
index 54504b4..220d37a 100755
--- a/darwin-x86/lib64/libziparchive.dylib
+++ b/darwin-x86/lib64/libziparchive.dylib
Binary files differ
diff --git a/linux-x86/bin/cxx_extractor b/linux-x86/bin/cxx_extractor
index c1cd972..dcab35a 100755
--- a/linux-x86/bin/cxx_extractor
+++ b/linux-x86/bin/cxx_extractor
Binary files differ
diff --git a/linux-x86/bin/header-abi-diff b/linux-x86/bin/header-abi-diff
index 3a9c4c2..f089e83 100755
--- a/linux-x86/bin/header-abi-diff
+++ b/linux-x86/bin/header-abi-diff
Binary files differ
diff --git a/linux-x86/bin/header-abi-dumper b/linux-x86/bin/header-abi-dumper
index 00165eb..6b58e4a 100755
--- a/linux-x86/bin/header-abi-dumper
+++ b/linux-x86/bin/header-abi-dumper
Binary files differ
diff --git a/linux-x86/bin/header-abi-linker b/linux-x86/bin/header-abi-linker
index d38fa8c..a0ddb5b 100755
--- a/linux-x86/bin/header-abi-linker
+++ b/linux-x86/bin/header-abi-linker
Binary files differ
diff --git a/linux-x86/bin/merge-abi-diff b/linux-x86/bin/merge-abi-diff
index fc26012..d459edd 100755
--- a/linux-x86/bin/merge-abi-diff
+++ b/linux-x86/bin/merge-abi-diff
Binary files differ
diff --git a/linux-x86/bin/proto_metadata_plugin b/linux-x86/bin/proto_metadata_plugin
index 36964b0..8574d40 100755
--- a/linux-x86/bin/proto_metadata_plugin
+++ b/linux-x86/bin/proto_metadata_plugin
Binary files differ
diff --git a/linux-x86/bin/protoc_extractor b/linux-x86/bin/protoc_extractor
index 823cfc8..012af54 100755
--- a/linux-x86/bin/protoc_extractor
+++ b/linux-x86/bin/protoc_extractor
Binary files differ
diff --git a/linux-x86/bin/versioner b/linux-x86/bin/versioner
index aa9011c..48d7a1a 100755
--- a/linux-x86/bin/versioner
+++ b/linux-x86/bin/versioner
Binary files differ
diff --git a/linux-x86/clang-headers b/linux-x86/clang-headers
index 4009f74..36c43f3 120000
--- a/linux-x86/clang-headers
+++ b/linux-x86/clang-headers
@@ -1 +1 @@
-lib64/clang/10.0.1/include
\ No newline at end of file
+lib64/clang/10.0.5/include
\ No newline at end of file
diff --git a/linux-x86/lib64/clang/10.0.1/include/altivec.h b/linux-x86/lib64/clang/10.0.1/include/altivec.h
deleted file mode 100644
index 4008440..0000000
--- a/linux-x86/lib64/clang/10.0.1/include/altivec.h
+++ /dev/null
@@ -1,16739 +0,0 @@
-/*===---- altivec.h - Standard header for type generic math ---------------===*\
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
-\*===----------------------------------------------------------------------===*/
-
-#ifndef __ALTIVEC_H
-#define __ALTIVEC_H
-
-#ifndef __ALTIVEC__
-#error "AltiVec support not enabled"
-#endif
-
-/* Constants for mapping CR6 bits to predicate result. */
-
-#define __CR6_EQ 0
-#define __CR6_EQ_REV 1
-#define __CR6_LT 2
-#define __CR6_LT_REV 3
-
-/* Constants for vec_test_data_class */
-#define __VEC_CLASS_FP_SUBNORMAL_N (1 << 0)
-#define __VEC_CLASS_FP_SUBNORMAL_P (1 << 1)
-#define __VEC_CLASS_FP_SUBNORMAL (__VEC_CLASS_FP_SUBNORMAL_P | \
-                                  __VEC_CLASS_FP_SUBNORMAL_N)
-#define __VEC_CLASS_FP_ZERO_N (1<<2)
-#define __VEC_CLASS_FP_ZERO_P (1<<3)
-#define __VEC_CLASS_FP_ZERO (__VEC_CLASS_FP_ZERO_P           | \
-                             __VEC_CLASS_FP_ZERO_N)
-#define __VEC_CLASS_FP_INFINITY_N (1<<4)
-#define __VEC_CLASS_FP_INFINITY_P (1<<5)
-#define __VEC_CLASS_FP_INFINITY (__VEC_CLASS_FP_INFINITY_P   | \
-                                 __VEC_CLASS_FP_INFINITY_N)
-#define __VEC_CLASS_FP_NAN (1<<6)
-#define __VEC_CLASS_FP_NOT_NORMAL (__VEC_CLASS_FP_NAN        | \
-                                   __VEC_CLASS_FP_SUBNORMAL  | \
-                                   __VEC_CLASS_FP_ZERO       | \
-                                   __VEC_CLASS_FP_INFINITY)
-
-#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__))
-
-#ifdef __POWER9_VECTOR__
-#include <stddef.h>
-#endif
-
-static __inline__ vector signed char __ATTRS_o_ai vec_perm(
-    vector signed char __a, vector signed char __b, vector unsigned char __c);
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_perm(vector unsigned char __a, vector unsigned char __b,
-         vector unsigned char __c);
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c);
-
-static __inline__ vector short __ATTRS_o_ai vec_perm(vector signed short __a,
-                                                     vector signed short __b,
-                                                     vector unsigned char __c);
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_perm(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned char __c);
-
-static __inline__ vector bool short __ATTRS_o_ai vec_perm(
-    vector bool short __a, vector bool short __b, vector unsigned char __c);
-
-static __inline__ vector pixel __ATTRS_o_ai vec_perm(vector pixel __a,
-                                                     vector pixel __b,
-                                                     vector unsigned char __c);
-
-static __inline__ vector int __ATTRS_o_ai vec_perm(vector signed int __a,
-                                                   vector signed int __b,
-                                                   vector unsigned char __c);
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_perm(
-    vector unsigned int __a, vector unsigned int __b, vector unsigned char __c);
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c);
-
-static __inline__ vector float __ATTRS_o_ai vec_perm(vector float __a,
-                                                     vector float __b,
-                                                     vector unsigned char __c);
-
-#ifdef __VSX__
-static __inline__ vector long long __ATTRS_o_ai
-vec_perm(vector signed long long __a, vector signed long long __b,
-         vector unsigned char __c);
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_perm(vector unsigned long long __a, vector unsigned long long __b,
-         vector unsigned char __c);
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_perm(vector bool long long __a, vector bool long long __b,
-         vector unsigned char __c);
-
-static __inline__ vector double __ATTRS_o_ai vec_perm(vector double __a,
-                                                      vector double __b,
-                                                      vector unsigned char __c);
-#endif
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xor(vector unsigned char __a, vector unsigned char __b);
-
-/* vec_abs */
-
-#define __builtin_altivec_abs_v16qi vec_abs
-#define __builtin_altivec_abs_v8hi vec_abs
-#define __builtin_altivec_abs_v4si vec_abs
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_abs(vector signed char __a) {
-  return __builtin_altivec_vmaxsb(__a, -__a);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_abs(vector signed short __a) {
-  return __builtin_altivec_vmaxsh(__a, -__a);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_abs(vector signed int __a) {
-  return __builtin_altivec_vmaxsw(__a, -__a);
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_abs(vector signed long long __a) {
-  return __builtin_altivec_vmaxsd(__a, -__a);
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_abs(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvabssp(__a);
-#else
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)(0x7FFFFFFF);
-  return (vector float)__res;
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_abs(vector double __a) {
-  return __builtin_vsx_xvabsdp(__a);
-}
-#endif
-
-/* vec_abss */
-#define __builtin_altivec_abss_v16qi vec_abss
-#define __builtin_altivec_abss_v8hi vec_abss
-#define __builtin_altivec_abss_v4si vec_abss
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_abss(vector signed char __a) {
-  return __builtin_altivec_vmaxsb(
-      __a, __builtin_altivec_vsubsbs((vector signed char)(0), __a));
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_abss(vector signed short __a) {
-  return __builtin_altivec_vmaxsh(
-      __a, __builtin_altivec_vsubshs((vector signed short)(0), __a));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_abss(vector signed int __a) {
-  return __builtin_altivec_vmaxsw(
-      __a, __builtin_altivec_vsubsws((vector signed int)(0), __a));
-}
-
-/* vec_absd */
-#if defined(__POWER9_VECTOR__)
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_absd(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vabsdub(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_absd(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vabsduh(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_absd(vector unsigned int __a,  vector unsigned int __b) {
-  return __builtin_altivec_vabsduw(__a, __b);
-}
-
-#endif /* End __POWER9_VECTOR__ */
-
-/* vec_add */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_add(vector signed char __a, vector signed char __b) {
-  return __a + __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_add(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a + __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_add(vector signed char __a, vector bool char __b) {
-  return __a + (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_add(vector unsigned char __a, vector unsigned char __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_add(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a + __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_add(vector unsigned char __a, vector bool char __b) {
-  return __a + (vector unsigned char)__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_add(vector short __a,
-                                                    vector short __b) {
-  return __a + __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_add(vector bool short __a,
-                                                    vector short __b) {
-  return (vector short)__a + __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_add(vector short __a,
-                                                    vector bool short __b) {
-  return __a + (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_add(vector unsigned short __a, vector unsigned short __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_add(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a + __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_add(vector unsigned short __a, vector bool short __b) {
-  return __a + (vector unsigned short)__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_add(vector int __a,
-                                                  vector int __b) {
-  return __a + __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_add(vector bool int __a,
-                                                  vector int __b) {
-  return (vector int)__a + __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_add(vector int __a,
-                                                  vector bool int __b) {
-  return __a + (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_add(vector unsigned int __a, vector unsigned int __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_add(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a + __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_add(vector unsigned int __a, vector bool int __b) {
-  return __a + (vector unsigned int)__b;
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_add(vector signed long long __a, vector signed long long __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_add(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a + __b;
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_add(vector signed __int128 __a, vector signed __int128 __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_add(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __a + __b;
-}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-
-static __inline__ vector float __ATTRS_o_ai vec_add(vector float __a,
-                                                    vector float __b) {
-  return __a + __b;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_add(vector double __a,
-                                                     vector double __b) {
-  return __a + __b;
-}
-#endif // __VSX__
-
-/* vec_adde */
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_adde(vector signed __int128 __a, vector signed __int128 __b,
-         vector signed __int128 __c) {
-  return __builtin_altivec_vaddeuqm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_adde(vector unsigned __int128 __a, vector unsigned __int128 __b,
-         vector unsigned __int128 __c) {
-  return __builtin_altivec_vaddeuqm(__a, __b, __c);
-}
-#endif
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_adde(vector signed int __a, vector signed int __b,
-         vector signed int __c) {
-  vector signed int __mask = {1, 1, 1, 1};
-  vector signed int __carry = __c & __mask;
-  return vec_add(vec_add(__a, __b), __carry);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_adde(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned int __c) {
-  vector unsigned int __mask = {1, 1, 1, 1};
-  vector unsigned int __carry = __c & __mask;
-  return vec_add(vec_add(__a, __b), __carry);
-}
-
-/* vec_addec */
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_addec(vector signed __int128 __a, vector signed __int128 __b,
-          vector signed __int128 __c) {
-  return __builtin_altivec_vaddecuq(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_addec(vector unsigned __int128 __a, vector unsigned __int128 __b,
-          vector unsigned __int128 __c) {
-  return __builtin_altivec_vaddecuq(__a, __b, __c);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_addec(vector signed int __a, vector signed int __b,
-          vector signed int __c) {
-
-  signed int __result[4];
-  for (int i = 0; i < 4; i++) {
-    unsigned int __tempa = (unsigned int) __a[i];
-    unsigned int __tempb = (unsigned int) __b[i];
-    unsigned int __tempc = (unsigned int) __c[i];
-    __tempc = __tempc & 0x00000001;
-    unsigned long long __longa = (unsigned long long) __tempa;
-    unsigned long long __longb = (unsigned long long) __tempb;
-    unsigned long long __longc = (unsigned long long) __tempc;
-    unsigned long long __sum = __longa + __longb + __longc;
-    unsigned long long __res = (__sum >> 32) & 0x01;
-    unsigned long long __tempres = (unsigned int) __res;
-    __result[i] = (signed int) __tempres;
-  }
-
-  vector signed int ret = { __result[0], __result[1], __result[2], __result[3] };
-  return ret;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_addec(vector unsigned int __a, vector unsigned int __b,
-          vector unsigned int __c) {
-
-  unsigned int __result[4];
-  for (int i = 0; i < 4; i++) {
-    unsigned int __tempc = __c[i] & 1;
-    unsigned long long __longa = (unsigned long long) __a[i];
-    unsigned long long __longb = (unsigned long long) __b[i];
-    unsigned long long __longc = (unsigned long long) __tempc;
-    unsigned long long __sum = __longa + __longb + __longc;
-    unsigned long long __res = (__sum >> 32) & 0x01;
-    unsigned long long __tempres = (unsigned int) __res;
-    __result[i] = (signed int) __tempres;
-  }
-
-  vector unsigned int ret = { __result[0], __result[1], __result[2], __result[3] };
-  return ret;
-}
-
-#endif
-
-/* vec_vaddubm */
-
-#define __builtin_altivec_vaddubm vec_vaddubm
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddubm(vector signed char __a, vector signed char __b) {
-  return __a + __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddubm(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a + __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddubm(vector signed char __a, vector bool char __b) {
-  return __a + (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubm(vector unsigned char __a, vector unsigned char __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubm(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a + __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubm(vector unsigned char __a, vector bool char __b) {
-  return __a + (vector unsigned char)__b;
-}
-
-/* vec_vadduhm */
-
-#define __builtin_altivec_vadduhm vec_vadduhm
-
-static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector short __a,
-                                                        vector short __b) {
-  return __a + __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector bool short __a,
-                                                        vector short __b) {
-  return (vector short)__a + __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector short __a,
-                                                        vector bool short __b) {
-  return __a + (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhm(vector unsigned short __a, vector unsigned short __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhm(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a + __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhm(vector unsigned short __a, vector bool short __b) {
-  return __a + (vector unsigned short)__b;
-}
-
-/* vec_vadduwm */
-
-#define __builtin_altivec_vadduwm vec_vadduwm
-
-static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector int __a,
-                                                      vector int __b) {
-  return __a + __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector bool int __a,
-                                                      vector int __b) {
-  return (vector int)__a + __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector int __a,
-                                                      vector bool int __b) {
-  return __a + (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduwm(vector unsigned int __a, vector unsigned int __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduwm(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a + __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduwm(vector unsigned int __a, vector bool int __b) {
-  return __a + (vector unsigned int)__b;
-}
-
-/* vec_vaddfp */
-
-#define __builtin_altivec_vaddfp vec_vaddfp
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vaddfp(vector float __a, vector float __b) {
-  return __a + __b;
-}
-
-/* vec_addc */
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_addc(vector signed int __a, vector signed int __b) {
-  return (vector signed int)__builtin_altivec_vaddcuw((vector unsigned int)__a,
-                                                      (vector unsigned int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_addc(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vaddcuw(__a, __b);
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_addc(vector signed __int128 __a, vector signed __int128 __b) {
-  return (vector signed __int128)__builtin_altivec_vaddcuq(
-      (vector unsigned __int128)__a, (vector unsigned __int128)__b);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_addc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __builtin_altivec_vaddcuq(__a, __b);
-}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-
-/* vec_vaddcuw */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vaddcuw(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vaddcuw(__a, __b);
-}
-
-/* vec_adds */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_adds(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vaddsbs(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_adds(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vaddsbs((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_adds(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vaddsbs(__a, (vector signed char)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_adds(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vaddubs(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_adds(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vaddubs((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_adds(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vaddubs(__a, (vector unsigned char)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_adds(vector short __a,
-                                                     vector short __b) {
-  return __builtin_altivec_vaddshs(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_adds(vector bool short __a,
-                                                     vector short __b) {
-  return __builtin_altivec_vaddshs((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_adds(vector short __a,
-                                                     vector bool short __b) {
-  return __builtin_altivec_vaddshs(__a, (vector short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_adds(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vadduhs(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_adds(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vadduhs((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_adds(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vadduhs(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_adds(vector int __a,
-                                                   vector int __b) {
-  return __builtin_altivec_vaddsws(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_adds(vector bool int __a,
-                                                   vector int __b) {
-  return __builtin_altivec_vaddsws((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_adds(vector int __a,
-                                                   vector bool int __b) {
-  return __builtin_altivec_vaddsws(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_adds(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vadduws(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_adds(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vadduws((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_adds(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vadduws(__a, (vector unsigned int)__b);
-}
-
-/* vec_vaddsbs */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddsbs(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vaddsbs(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddsbs(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vaddsbs((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddsbs(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vaddsbs(__a, (vector signed char)__b);
-}
-
-/* vec_vaddubs */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubs(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vaddubs(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubs(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vaddubs((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubs(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vaddubs(__a, (vector unsigned char)__b);
-}
-
-/* vec_vaddshs */
-
-static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector short __a,
-                                                        vector short __b) {
-  return __builtin_altivec_vaddshs(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector bool short __a,
-                                                        vector short __b) {
-  return __builtin_altivec_vaddshs((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector short __a,
-                                                        vector bool short __b) {
-  return __builtin_altivec_vaddshs(__a, (vector short)__b);
-}
-
-/* vec_vadduhs */
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhs(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vadduhs(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhs(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vadduhs((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhs(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vadduhs(__a, (vector unsigned short)__b);
-}
-
-/* vec_vaddsws */
-
-static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector int __a,
-                                                      vector int __b) {
-  return __builtin_altivec_vaddsws(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector bool int __a,
-                                                      vector int __b) {
-  return __builtin_altivec_vaddsws((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector int __a,
-                                                      vector bool int __b) {
-  return __builtin_altivec_vaddsws(__a, (vector int)__b);
-}
-
-/* vec_vadduws */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduws(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vadduws(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduws(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vadduws((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduws(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vadduws(__a, (vector unsigned int)__b);
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-/* vec_vadduqm */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vadduqm(vector signed __int128 __a, vector signed __int128 __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vadduqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __a + __b;
-}
-
-/* vec_vaddeuqm */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vaddeuqm(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vaddeuqm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vaddeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vaddeuqm(__a, __b, __c);
-}
-
-/* vec_vaddcuq */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vaddcuq(vector signed __int128 __a, vector signed __int128 __b) {
-  return __builtin_altivec_vaddcuq(__a, __b);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vaddcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __builtin_altivec_vaddcuq(__a, __b);
-}
-
-/* vec_vaddecuq */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vaddecuq(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vaddecuq(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vaddecuq(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vaddecuq(__a, __b, __c);
-}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-
-/* vec_and */
-
-#define __builtin_altivec_vand vec_and
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_and(vector signed char __a, vector signed char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_and(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a & __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_and(vector signed char __a, vector bool char __b) {
-  return __a & (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_and(vector unsigned char __a, vector unsigned char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_and(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a & __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_and(vector unsigned char __a, vector bool char __b) {
-  return __a & (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_and(vector bool char __a,
-                                                        vector bool char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_and(vector short __a,
-                                                    vector short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_and(vector bool short __a,
-                                                    vector short __b) {
-  return (vector short)__a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_and(vector short __a,
-                                                    vector bool short __b) {
-  return __a & (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_and(vector unsigned short __a, vector unsigned short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_and(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a & __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_and(vector unsigned short __a, vector bool short __b) {
-  return __a & (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_and(vector bool short __a, vector bool short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_and(vector int __a,
-                                                  vector int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_and(vector bool int __a,
-                                                  vector int __b) {
-  return (vector int)__a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_and(vector int __a,
-                                                  vector bool int __b) {
-  return __a & (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_and(vector unsigned int __a, vector unsigned int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_and(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a & __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_and(vector unsigned int __a, vector bool int __b) {
-  return __a & (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_and(vector bool int __a,
-                                                       vector bool int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_and(vector float __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_and(vector bool int __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_and(vector float __a,
-                                                    vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_and(vector bool long long __a,
-                                                     vector double __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & (vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_and(vector double __a, vector bool long long __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & (vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_and(vector double __a,
-                                                     vector double __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & (vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_and(vector signed long long __a, vector signed long long __b) {
-  return __a & __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_and(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a & __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_and(vector signed long long __a, vector bool long long __b) {
-  return __a & (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_and(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_and(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a & __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_and(vector unsigned long long __a, vector bool long long __b) {
-  return __a & (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_and(vector bool long long __a, vector bool long long __b) {
-  return __a & __b;
-}
-#endif
-
-/* vec_vand */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vand(vector signed char __a, vector signed char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vand(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a & __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vand(vector signed char __a, vector bool char __b) {
-  return __a & (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vand(vector unsigned char __a, vector unsigned char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vand(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a & __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vand(vector unsigned char __a, vector bool char __b) {
-  return __a & (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vand(vector bool char __a,
-                                                         vector bool char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vand(vector short __a,
-                                                     vector short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vand(vector bool short __a,
-                                                     vector short __b) {
-  return (vector short)__a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vand(vector short __a,
-                                                     vector bool short __b) {
-  return __a & (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vand(vector unsigned short __a, vector unsigned short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vand(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a & __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vand(vector unsigned short __a, vector bool short __b) {
-  return __a & (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vand(vector bool short __a, vector bool short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vand(vector int __a,
-                                                   vector int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vand(vector bool int __a,
-                                                   vector int __b) {
-  return (vector int)__a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vand(vector int __a,
-                                                   vector bool int __b) {
-  return __a & (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vand(vector unsigned int __a, vector unsigned int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vand(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a & __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vand(vector unsigned int __a, vector bool int __b) {
-  return __a & (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vand(vector bool int __a,
-                                                        vector bool int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vand(vector float __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vand(vector bool int __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vand(vector float __a,
-                                                     vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vand(vector signed long long __a, vector signed long long __b) {
-  return __a & __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vand(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a & __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vand(vector signed long long __a, vector bool long long __b) {
-  return __a & (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vand(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vand(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a & __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vand(vector unsigned long long __a, vector bool long long __b) {
-  return __a & (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vand(vector bool long long __a, vector bool long long __b) {
-  return __a & __b;
-}
-#endif
-
-/* vec_andc */
-
-#define __builtin_altivec_vandc vec_andc
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_andc(vector signed char __a, vector signed char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_andc(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a & ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_andc(vector signed char __a, vector bool char __b) {
-  return __a & ~(vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_andc(vector unsigned char __a, vector unsigned char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_andc(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a & ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_andc(vector unsigned char __a, vector bool char __b) {
-  return __a & ~(vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_andc(vector bool char __a,
-                                                         vector bool char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_andc(vector short __a,
-                                                     vector short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_andc(vector bool short __a,
-                                                     vector short __b) {
-  return (vector short)__a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_andc(vector short __a,
-                                                     vector bool short __b) {
-  return __a & ~(vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_andc(vector unsigned short __a, vector unsigned short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_andc(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a & ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_andc(vector unsigned short __a, vector bool short __b) {
-  return __a & ~(vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_andc(vector bool short __a, vector bool short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_andc(vector int __a,
-                                                   vector int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_andc(vector bool int __a,
-                                                   vector int __b) {
-  return (vector int)__a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_andc(vector int __a,
-                                                   vector bool int __b) {
-  return __a & ~(vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_andc(vector unsigned int __a, vector unsigned int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_andc(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a & ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_andc(vector unsigned int __a, vector bool int __b) {
-  return __a & ~(vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_andc(vector bool int __a,
-                                                        vector bool int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_andc(vector float __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_andc(vector bool int __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_andc(vector float __a,
-                                                     vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_andc(vector bool long long __a,
-                                                      vector double __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & ~(vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_andc(vector double __a, vector bool long long __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & ~(vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_andc(vector double __a,
-                                                      vector double __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & ~(vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_andc(vector signed long long __a, vector signed long long __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_andc(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a & ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_andc(vector signed long long __a, vector bool long long __b) {
-  return __a & ~(vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_andc(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_andc(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a & ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_andc(vector unsigned long long __a, vector bool long long __b) {
-  return __a & ~(vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_andc(vector bool long long __a, vector bool long long __b) {
-  return __a & ~__b;
-}
-#endif
-
-/* vec_vandc */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vandc(vector signed char __a, vector signed char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vandc(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a & ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vandc(vector signed char __a, vector bool char __b) {
-  return __a & ~(vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vandc(vector unsigned char __a, vector unsigned char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vandc(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a & ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vandc(vector unsigned char __a, vector bool char __b) {
-  return __a & ~(vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vandc(vector bool char __a, vector bool char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vandc(vector short __a,
-                                                      vector short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vandc(vector bool short __a,
-                                                      vector short __b) {
-  return (vector short)__a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vandc(vector short __a,
-                                                      vector bool short __b) {
-  return __a & ~(vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vandc(vector unsigned short __a, vector unsigned short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vandc(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a & ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vandc(vector unsigned short __a, vector bool short __b) {
-  return __a & ~(vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vandc(vector bool short __a, vector bool short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vandc(vector int __a,
-                                                    vector int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vandc(vector bool int __a,
-                                                    vector int __b) {
-  return (vector int)__a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vandc(vector int __a,
-                                                    vector bool int __b) {
-  return __a & ~(vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vandc(vector unsigned int __a, vector unsigned int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vandc(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a & ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vandc(vector unsigned int __a, vector bool int __b) {
-  return __a & ~(vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vandc(vector bool int __a,
-                                                         vector bool int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vandc(vector float __a,
-                                                      vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vandc(vector bool int __a,
-                                                      vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vandc(vector float __a,
-                                                      vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vandc(vector signed long long __a, vector signed long long __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vandc(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a & ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vandc(vector signed long long __a, vector bool long long __b) {
-  return __a & ~(vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vandc(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vandc(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a & ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vandc(vector unsigned long long __a, vector bool long long __b) {
-  return __a & ~(vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vandc(vector bool long long __a, vector bool long long __b) {
-  return __a & ~__b;
-}
-#endif
-
-/* vec_avg */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_avg(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vavgsb(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_avg(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vavgub(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_avg(vector short __a,
-                                                    vector short __b) {
-  return __builtin_altivec_vavgsh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_avg(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vavguh(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_avg(vector int __a,
-                                                  vector int __b) {
-  return __builtin_altivec_vavgsw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_avg(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vavguw(__a, __b);
-}
-
-/* vec_vavgsb */
-
-static __inline__ vector signed char __attribute__((__always_inline__))
-vec_vavgsb(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vavgsb(__a, __b);
-}
-
-/* vec_vavgub */
-
-static __inline__ vector unsigned char __attribute__((__always_inline__))
-vec_vavgub(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vavgub(__a, __b);
-}
-
-/* vec_vavgsh */
-
-static __inline__ vector short __attribute__((__always_inline__))
-vec_vavgsh(vector short __a, vector short __b) {
-  return __builtin_altivec_vavgsh(__a, __b);
-}
-
-/* vec_vavguh */
-
-static __inline__ vector unsigned short __attribute__((__always_inline__))
-vec_vavguh(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vavguh(__a, __b);
-}
-
-/* vec_vavgsw */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vavgsw(vector int __a, vector int __b) {
-  return __builtin_altivec_vavgsw(__a, __b);
-}
-
-/* vec_vavguw */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vavguw(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vavguw(__a, __b);
-}
-
-/* vec_ceil */
-
-static __inline__ vector float __ATTRS_o_ai vec_ceil(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvrspip(__a);
-#else
-  return __builtin_altivec_vrfip(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_ceil(vector double __a) {
-  return __builtin_vsx_xvrdpip(__a);
-}
-#endif
-
-/* vec_vrfip */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vrfip(vector float __a) {
-  return __builtin_altivec_vrfip(__a);
-}
-
-/* vec_cmpb */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_cmpb(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpbfp(__a, __b);
-}
-
-/* vec_vcmpbfp */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vcmpbfp(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpbfp(__a, __b);
-}
-
-/* vec_cmpeq */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpeq(vector signed char __a, vector signed char __b) {
-  return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a,
-                                                      (vector char)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpeq(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a,
-                                                      (vector char)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpeq(vector bool char __a, vector bool char __b) {
-  return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a,
-                                                      (vector char)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_cmpeq(vector short __a,
-                                                           vector short __b) {
-  return (vector bool short)__builtin_altivec_vcmpequh(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpeq(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vcmpequh((vector short)__a,
-                                                       (vector short)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpeq(vector bool short __a, vector bool short __b) {
-  return (vector bool short)__builtin_altivec_vcmpequh((vector short)__a,
-                                                       (vector short)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector int __a,
-                                                         vector int __b) {
-  return (vector bool int)__builtin_altivec_vcmpequw(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpeq(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vcmpequw((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector bool int __a,
-                                                         vector bool int __b) {
-  return (vector bool int)__builtin_altivec_vcmpequw((vector int)__a,
-                                                     (vector int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpeq(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)__builtin_altivec_vcmpequd(__a, __b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)__builtin_altivec_vcmpequd(
-      (vector long long)__a, (vector long long)__b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpeq(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)__builtin_altivec_vcmpequd(
-      (vector long long)__a, (vector long long)__b);
-}
-
-#endif
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector float __a,
-                                                         vector float __b) {
-#ifdef __VSX__
-  return (vector bool int)__builtin_vsx_xvcmpeqsp(__a, __b);
-#else
-  return (vector bool int)__builtin_altivec_vcmpeqfp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpeq(vector double __a, vector double __b) {
-  return (vector bool long long)__builtin_vsx_xvcmpeqdp(__a, __b);
-}
-#endif
-
-#ifdef __POWER9_VECTOR__
-/* vec_cmpne */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpne(vector bool char __a, vector bool char __b) {
-  return (vector bool char)__builtin_altivec_vcmpneb((vector char)__a,
-                                                     (vector char)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpne(vector signed char __a, vector signed char __b) {
-  return (vector bool char)__builtin_altivec_vcmpneb((vector char)__a,
-                                                     (vector char)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpne(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vcmpneb((vector char)__a,
-                                                     (vector char)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpne(vector bool short __a, vector bool short __b) {
-  return (vector bool short)__builtin_altivec_vcmpneh((vector short)__a,
-                                                      (vector short)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpne(vector signed short __a, vector signed short __b) {
-  return (vector bool short)__builtin_altivec_vcmpneh((vector short)__a,
-                                                      (vector short)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpne(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vcmpneh((vector short)__a,
-                                                      (vector short)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpne(vector bool int __a, vector bool int __b) {
-  return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpne(vector signed int __a, vector signed int __b) {
-  return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpne(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpne(vector float __a, vector float __b) {
-  return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector double __a, vector double __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
-/* vec_cmpnez */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpnez(vector signed char __a, vector signed char __b) {
-  return (vector bool char)__builtin_altivec_vcmpnezb((vector char)__a,
-                                                      (vector char)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpnez(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vcmpnezb((vector char)__a,
-                                                      (vector char)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpnez(vector signed short __a, vector signed short __b) {
-  return (vector bool short)__builtin_altivec_vcmpnezh((vector short)__a,
-                                                       (vector short)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpnez(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vcmpnezh((vector short)__a,
-                                                       (vector short)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpnez(vector signed int __a, vector signed int __b) {
-  return (vector bool int)__builtin_altivec_vcmpnezw((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpnez(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vcmpnezw((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ signed int __ATTRS_o_ai
-vec_cntlz_lsbb(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vctzlsbb(__a);
-#else
-  return __builtin_altivec_vclzlsbb(__a);
-#endif
-}
-
-static __inline__ signed int __ATTRS_o_ai
-vec_cntlz_lsbb(vector unsigned char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vctzlsbb(__a);
-#else
-  return __builtin_altivec_vclzlsbb(__a);
-#endif
-}
-
-static __inline__ signed int __ATTRS_o_ai
-vec_cnttz_lsbb(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclzlsbb(__a);
-#else
-  return __builtin_altivec_vctzlsbb(__a);
-#endif
-}
-
-static __inline__ signed int __ATTRS_o_ai
-vec_cnttz_lsbb(vector unsigned char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclzlsbb(__a);
-#else
-  return __builtin_altivec_vctzlsbb(__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_parity_lsbb(vector unsigned int __a) {
-  return __builtin_altivec_vprtybw(__a);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_parity_lsbb(vector signed int __a) {
-  return __builtin_altivec_vprtybw(__a);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_parity_lsbb(vector unsigned __int128 __a) {
-  return __builtin_altivec_vprtybq(__a);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_parity_lsbb(vector signed __int128 __a) {
-  return __builtin_altivec_vprtybq(__a);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_parity_lsbb(vector unsigned long long __a) {
-  return __builtin_altivec_vprtybd(__a);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_parity_lsbb(vector signed long long __a) {
-  return __builtin_altivec_vprtybd(__a);
-}
-
-#endif
-
-/* vec_cmpgt */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpgt(vector signed char __a, vector signed char __b) {
-  return (vector bool char)__builtin_altivec_vcmpgtsb(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpgt(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vcmpgtub(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_cmpgt(vector short __a,
-                                                           vector short __b) {
-  return (vector bool short)__builtin_altivec_vcmpgtsh(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpgt(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vcmpgtuh(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpgt(vector int __a,
-                                                         vector int __b) {
-  return (vector bool int)__builtin_altivec_vcmpgtsw(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpgt(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vcmpgtuw(__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpgt(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)__builtin_altivec_vcmpgtsd(__a, __b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)__builtin_altivec_vcmpgtud(__a, __b);
-}
-#endif
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpgt(vector float __a,
-                                                         vector float __b) {
-#ifdef __VSX__
-  return (vector bool int)__builtin_vsx_xvcmpgtsp(__a, __b);
-#else
-  return (vector bool int)__builtin_altivec_vcmpgtfp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpgt(vector double __a, vector double __b) {
-  return (vector bool long long)__builtin_vsx_xvcmpgtdp(__a, __b);
-}
-#endif
-
-/* vec_cmpge */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpge(vector signed char __a, vector signed char __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpge(vector unsigned char __a, vector unsigned char __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpge(vector signed short __a, vector signed short __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpge(vector unsigned short __a, vector unsigned short __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpge(vector signed int __a, vector signed int __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpge(vector unsigned int __a, vector unsigned int __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpge(vector float __a,
-                                                         vector float __b) {
-#ifdef __VSX__
-  return (vector bool int)__builtin_vsx_xvcmpgesp(__a, __b);
-#else
-  return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpge(vector double __a, vector double __b) {
-  return (vector bool long long)__builtin_vsx_xvcmpgedp(__a, __b);
-}
-#endif
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpge(vector signed long long __a, vector signed long long __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-#endif
-
-/* vec_vcmpgefp */
-
-static __inline__ vector bool int __attribute__((__always_inline__))
-vec_vcmpgefp(vector float __a, vector float __b) {
-  return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
-}
-
-/* vec_vcmpgtsb */
-
-static __inline__ vector bool char __attribute__((__always_inline__))
-vec_vcmpgtsb(vector signed char __a, vector signed char __b) {
-  return (vector bool char)__builtin_altivec_vcmpgtsb(__a, __b);
-}
-
-/* vec_vcmpgtub */
-
-static __inline__ vector bool char __attribute__((__always_inline__))
-vec_vcmpgtub(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vcmpgtub(__a, __b);
-}
-
-/* vec_vcmpgtsh */
-
-static __inline__ vector bool short __attribute__((__always_inline__))
-vec_vcmpgtsh(vector short __a, vector short __b) {
-  return (vector bool short)__builtin_altivec_vcmpgtsh(__a, __b);
-}
-
-/* vec_vcmpgtuh */
-
-static __inline__ vector bool short __attribute__((__always_inline__))
-vec_vcmpgtuh(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vcmpgtuh(__a, __b);
-}
-
-/* vec_vcmpgtsw */
-
-static __inline__ vector bool int __attribute__((__always_inline__))
-vec_vcmpgtsw(vector int __a, vector int __b) {
-  return (vector bool int)__builtin_altivec_vcmpgtsw(__a, __b);
-}
-
-/* vec_vcmpgtuw */
-
-static __inline__ vector bool int __attribute__((__always_inline__))
-vec_vcmpgtuw(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vcmpgtuw(__a, __b);
-}
-
-/* vec_vcmpgtfp */
-
-static __inline__ vector bool int __attribute__((__always_inline__))
-vec_vcmpgtfp(vector float __a, vector float __b) {
-  return (vector bool int)__builtin_altivec_vcmpgtfp(__a, __b);
-}
-
-/* vec_cmple */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmple(vector signed char __a, vector signed char __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmple(vector unsigned char __a, vector unsigned char __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmple(vector signed short __a, vector signed short __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmple(vector unsigned short __a, vector unsigned short __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmple(vector signed int __a, vector signed int __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmple(vector unsigned int __a, vector unsigned int __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmple(vector float __a,
-                                                         vector float __b) {
-  return vec_cmpge(__b, __a);
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmple(vector double __a, vector double __b) {
-  return vec_cmpge(__b, __a);
-}
-#endif
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmple(vector signed long long __a, vector signed long long __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmple(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_cmpge(__b, __a);
-}
-#endif
-
-/* vec_cmplt */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmplt(vector signed char __a, vector signed char __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmplt(vector unsigned char __a, vector unsigned char __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_cmplt(vector short __a,
-                                                           vector short __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmplt(vector unsigned short __a, vector unsigned short __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmplt(vector int __a,
-                                                         vector int __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmplt(vector unsigned int __a, vector unsigned int __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmplt(vector float __a,
-                                                         vector float __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmplt(vector double __a, vector double __b) {
-  return vec_cmpgt(__b, __a);
-}
-#endif
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmplt(vector signed long long __a, vector signed long long __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-/* vec_popcnt */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_popcnt(vector signed char __a) {
-  return __builtin_altivec_vpopcntb(__a);
-}
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_popcnt(vector unsigned char __a) {
-  return __builtin_altivec_vpopcntb(__a);
-}
-static __inline__ vector signed short __ATTRS_o_ai
-vec_popcnt(vector signed short __a) {
-  return __builtin_altivec_vpopcnth(__a);
-}
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_popcnt(vector unsigned short __a) {
-  return __builtin_altivec_vpopcnth(__a);
-}
-static __inline__ vector signed int __ATTRS_o_ai
-vec_popcnt(vector signed int __a) {
-  return __builtin_altivec_vpopcntw(__a);
-}
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_popcnt(vector unsigned int __a) {
-  return __builtin_altivec_vpopcntw(__a);
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_popcnt(vector signed long long __a) {
-  return __builtin_altivec_vpopcntd(__a);
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_popcnt(vector unsigned long long __a) {
-  return __builtin_altivec_vpopcntd(__a);
-}
-
-/* vec_cntlz */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_cntlz(vector signed char __a) {
-  return __builtin_altivec_vclzb(__a);
-}
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_cntlz(vector unsigned char __a) {
-  return __builtin_altivec_vclzb(__a);
-}
-static __inline__ vector signed short __ATTRS_o_ai
-vec_cntlz(vector signed short __a) {
-  return __builtin_altivec_vclzh(__a);
-}
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_cntlz(vector unsigned short __a) {
-  return __builtin_altivec_vclzh(__a);
-}
-static __inline__ vector signed int __ATTRS_o_ai
-vec_cntlz(vector signed int __a) {
-  return __builtin_altivec_vclzw(__a);
-}
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_cntlz(vector unsigned int __a) {
-  return __builtin_altivec_vclzw(__a);
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_cntlz(vector signed long long __a) {
-  return __builtin_altivec_vclzd(__a);
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_cntlz(vector unsigned long long __a) {
-  return __builtin_altivec_vclzd(__a);
-}
-#endif
-
-#ifdef __POWER9_VECTOR__
-
-/* vec_cnttz */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_cnttz(vector signed char __a) {
-  return __builtin_altivec_vctzb(__a);
-}
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_cnttz(vector unsigned char __a) {
-  return __builtin_altivec_vctzb(__a);
-}
-static __inline__ vector signed short __ATTRS_o_ai
-vec_cnttz(vector signed short __a) {
-  return __builtin_altivec_vctzh(__a);
-}
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_cnttz(vector unsigned short __a) {
-  return __builtin_altivec_vctzh(__a);
-}
-static __inline__ vector signed int __ATTRS_o_ai
-vec_cnttz(vector signed int __a) {
-  return __builtin_altivec_vctzw(__a);
-}
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_cnttz(vector unsigned int __a) {
-  return __builtin_altivec_vctzw(__a);
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_cnttz(vector signed long long __a) {
-  return __builtin_altivec_vctzd(__a);
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_cnttz(vector unsigned long long __a) {
-  return __builtin_altivec_vctzd(__a);
-}
-
-/* vec_first_match_index */
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector signed char __a, vector signed char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector unsigned char __a, vector unsigned char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector signed short __a, vector signed short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector unsigned short __a, vector unsigned short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector signed int __a, vector signed int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector unsigned int __a, vector unsigned int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-/* vec_first_match_or_eos_index */
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector signed char __a, vector signed char __b) {
-  /* Compare the result of the comparison of two vectors with either and OR the
-     result. Either the elements are equal or one will equal the comparison
-     result if either is zero.
-  */
-  vector bool char __tmp1 = vec_cmpeq(__a, __b);
-  vector bool char __tmp2 = __tmp1 |
-                            vec_cmpeq((vector signed char)__tmp1, __a) |
-                            vec_cmpeq((vector signed char)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-      vec_cnttz((vector unsigned long long)__tmp2);
-#else
-      vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector unsigned char __a,
-                             vector unsigned char __b) {
-  vector bool char __tmp1 = vec_cmpeq(__a, __b);
-  vector bool char __tmp2 = __tmp1 |
-                            vec_cmpeq((vector unsigned char)__tmp1, __a) |
-                            vec_cmpeq((vector unsigned char)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-      vec_cnttz((vector unsigned long long)__tmp2);
-#else
-      vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector signed short __a, vector signed short __b) {
-  vector bool short __tmp1 = vec_cmpeq(__a, __b);
-  vector bool short __tmp2 = __tmp1 |
-                             vec_cmpeq((vector signed short)__tmp1, __a) |
-                             vec_cmpeq((vector signed short)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-      vec_cnttz((vector unsigned long long)__tmp2);
-#else
-      vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector unsigned short __a,
-                             vector unsigned short __b) {
-  vector bool short __tmp1 = vec_cmpeq(__a, __b);
-  vector bool short __tmp2 = __tmp1 |
-                             vec_cmpeq((vector unsigned short)__tmp1, __a) |
-                             vec_cmpeq((vector unsigned short)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-      vec_cnttz((vector unsigned long long)__tmp2);
-#else
-      vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector signed int __a, vector signed int __b) {
-  vector bool int __tmp1 = vec_cmpeq(__a, __b);
-  vector bool int __tmp2 = __tmp1 | vec_cmpeq((vector signed int)__tmp1, __a) |
-                           vec_cmpeq((vector signed int)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-      vec_cnttz((vector unsigned long long)__tmp2);
-#else
-      vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector unsigned int __a, vector unsigned int __b) {
-  vector bool int __tmp1 = vec_cmpeq(__a, __b);
-  vector bool int __tmp2 = __tmp1 |
-                           vec_cmpeq((vector unsigned int)__tmp1, __a) |
-                           vec_cmpeq((vector unsigned int)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)__tmp2);
-#else
-    vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-/* vec_first_mismatch_index */
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector signed char __a, vector signed char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector unsigned char __a, vector unsigned char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector signed short __a, vector signed short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector unsigned short __a, vector unsigned short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector signed int __a, vector signed int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector unsigned int __a, vector unsigned int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-/* vec_first_mismatch_or_eos_index */
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector signed char __a,
-                                vector signed char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector unsigned char __a,
-                                vector unsigned char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector signed short __a,
-                                vector signed short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector unsigned short __a,
-                                vector unsigned short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector signed int __a, vector signed int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector unsigned int __a,
-                                vector unsigned int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-static __inline__ vector double  __ATTRS_o_ai
-vec_insert_exp(vector double __a, vector unsigned long long __b) {
-  return __builtin_vsx_xviexpdp((vector unsigned long long)__a,__b);
-}
-
-static __inline__ vector double  __ATTRS_o_ai
-vec_insert_exp(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_vsx_xviexpdp(__a,__b);
-}
-
-static __inline__ vector float  __ATTRS_o_ai
-vec_insert_exp(vector float __a, vector unsigned int __b) {
-  return __builtin_vsx_xviexpsp((vector unsigned int)__a,__b);
-}
-
-static __inline__ vector float  __ATTRS_o_ai
-vec_insert_exp(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_vsx_xviexpsp(__a,__b);
-}
-
-#if defined(__powerpc64__)
-static __inline__ vector signed char __ATTRS_o_ai vec_xl_len(signed char *__a,
-                                                             size_t __b) {
-  return (vector signed char)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xl_len(unsigned char *__a, size_t __b) {
-  return (vector unsigned char)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector signed short __ATTRS_o_ai vec_xl_len(signed short *__a,
-                                                              size_t __b) {
-  return (vector signed short)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xl_len(unsigned short *__a, size_t __b) {
-  return (vector unsigned short)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai vec_xl_len(signed int *__a,
-                                                            size_t __b) {
-  return (vector signed int)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_xl_len(unsigned int *__a,
-                                                              size_t __b) {
-  return (vector unsigned int)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_xl_len(float *__a, size_t __b) {
-  return (vector float)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_xl_len(signed __int128 *__a, size_t __b) {
-  return (vector signed __int128)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_len(unsigned __int128 *__a, size_t __b) {
-  return (vector unsigned __int128)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_xl_len(signed long long *__a, size_t __b) {
-  return (vector signed long long)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xl_len(unsigned long long *__a, size_t __b) {
-  return (vector unsigned long long)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_xl_len(double *__a,
-                                                        size_t __b) {
-  return (vector double)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_xl_len_r(unsigned char *__a,
-                                                          size_t __b) {
-  vector unsigned char __res =
-      (vector unsigned char)__builtin_vsx_lxvll(__a, (__b << 56));
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __mask =
-      (vector unsigned char)__builtin_altivec_lvsr(16 - __b, (int *)NULL);
-  __res = (vector unsigned char)__builtin_altivec_vperm_4si(
-      (vector int)__res, (vector int)__res, __mask);
-#endif
-  return __res;
-}
-
-// vec_xst_len
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned char __a,
-                                                unsigned char *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed char __a,
-                                                signed char *__b, size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed short __a,
-                                                signed short *__b, size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned short __a,
-                                                unsigned short *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed int __a,
-                                                signed int *__b, size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned int __a,
-                                                unsigned int *__b, size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector float __a, float *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed __int128 __a,
-                                                signed __int128 *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned __int128 __a,
-                                                unsigned __int128 *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed long long __a,
-                                                signed long long *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned long long __a,
-                                                unsigned long long *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector double __a, double *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len_r(vector unsigned char __a,
-                                                  unsigned char *__b,
-                                                  size_t __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __mask =
-      (vector unsigned char)__builtin_altivec_lvsl(16 - __c, (int *)NULL);
-  vector unsigned char __res =
-      __builtin_altivec_vperm_4si((vector int)__a, (vector int)__a, __mask);
-  return __builtin_vsx_stxvll((vector int)__res, __b, (__c << 56));
-#else
-  return __builtin_vsx_stxvll((vector int)__a, __b, (__c << 56));
-#endif
-}
-#endif
-#endif
-
-/* vec_cpsgn */
-
-#ifdef __VSX__
-static __inline__ vector float __ATTRS_o_ai vec_cpsgn(vector float __a,
-                                                      vector float __b) {
-  return __builtin_vsx_xvcpsgnsp(__a, __b);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
-                                                       vector double __b) {
-  return __builtin_vsx_xvcpsgndp(__a, __b);
-}
-#endif
-
-/* vec_ctf */
-
-#ifdef __VSX__
-#define vec_ctf(__a, __b)                                                      \
-  _Generic((__a), vector int                                                   \
-           : (vector float)__builtin_altivec_vcfsx((__a), (__b)),              \
-             vector unsigned int                                               \
-           : (vector float)__builtin_altivec_vcfux((vector int)(__a), (__b)),  \
-             vector unsigned long long                                         \
-           : (__builtin_convertvector((vector unsigned long long)(__a),        \
-                                      vector double) *                         \
-              (vector double)(vector unsigned long long)((0x3ffULL - (__b))    \
-                                                         << 52)),              \
-             vector signed long long                                           \
-           : (__builtin_convertvector((vector signed long long)(__a),          \
-                                      vector double) *                         \
-              (vector double)(vector unsigned long long)((0x3ffULL - (__b))    \
-                                                         << 52)))
-#else
-#define vec_ctf(__a, __b)                                                      \
-  _Generic((__a), vector int                                                   \
-           : (vector float)__builtin_altivec_vcfsx((__a), (__b)),              \
-             vector unsigned int                                               \
-           : (vector float)__builtin_altivec_vcfux((vector int)(__a), (__b)))
-#endif
-
-/* vec_vcfsx */
-
-#define vec_vcfux __builtin_altivec_vcfux
-
-/* vec_vcfux */
-
-#define vec_vcfsx(__a, __b) __builtin_altivec_vcfsx((vector int)(__a), (__b))
-
-/* vec_cts */
-
-#ifdef __VSX__
-#define vec_cts(__a, __b)                                                      \
-  _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctsxs((__a), (__b)), vector double             \
-           : __extension__({                                                   \
-             vector double __ret =                                             \
-                 (__a) *                                                       \
-                 (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
-                                                            << 52);            \
-             __builtin_convertvector(__ret, vector signed long long);          \
-           }))
-#else
-#define vec_cts __builtin_altivec_vctsxs
-#endif
-
-/* vec_vctsxs */
-
-#define vec_vctsxs __builtin_altivec_vctsxs
-
-/* vec_ctu */
-
-#ifdef __VSX__
-#define vec_ctu(__a, __b)                                                      \
-  _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctuxs((__a), (__b)), vector double             \
-           : __extension__({                                                   \
-             vector double __ret =                                             \
-                 (__a) *                                                       \
-                 (vector double)(vector unsigned long long)((0x3ffULL + __b)   \
-                                                            << 52);            \
-             __builtin_convertvector(__ret, vector unsigned long long);        \
-           }))
-#else
-#define vec_ctu __builtin_altivec_vctuxs
-#endif
-
-/* vec_vctuxs */
-
-#define vec_vctuxs __builtin_altivec_vctuxs
-
-/* vec_signed */
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_sld(vector signed int, vector signed int, unsigned const int __c);
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_signed(vector float __a) {
-  return __builtin_convertvector(__a, vector signed int);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_signed(vector double __a) {
-  return __builtin_convertvector(__a, vector signed long long);
-}
-
-static __inline__ vector signed int __attribute__((__always_inline__))
-vec_signed2(vector double __a, vector double __b) {
-  return (vector signed int) { __a[0], __a[1], __b[0], __b[1] };
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_signede(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  vector signed int __ret = __builtin_vsx_xvcvdpsxws(__a);
-  return vec_sld(__ret, __ret, 12);
-#else
-  return __builtin_vsx_xvcvdpsxws(__a);
-#endif
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_signedo(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvdpsxws(__a);
-#else
-  vector signed int __ret = __builtin_vsx_xvcvdpsxws(__a);
-  return vec_sld(__ret, __ret, 12);
-#endif
-}
-#endif
-
-/* vec_unsigned */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sld(vector unsigned int, vector unsigned int, unsigned const int __c);
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_unsigned(vector float __a) {
-  return __builtin_convertvector(__a, vector unsigned int);
-}
-
-#ifdef __VSX__
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_unsigned(vector double __a) {
-  return __builtin_convertvector(__a, vector unsigned long long);
-}
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_unsigned2(vector double __a, vector double __b) {
-  return (vector unsigned int) { __a[0], __a[1], __b[0], __b[1] };
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_unsignede(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned int __ret = __builtin_vsx_xvcvdpuxws(__a);
-  return vec_sld(__ret, __ret, 12);
-#else
-  return __builtin_vsx_xvcvdpuxws(__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_unsignedo(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvdpuxws(__a);
-#else
-  vector unsigned int __ret = __builtin_vsx_xvcvdpuxws(__a);
-  return vec_sld(__ret, __ret, 12);
-#endif
-}
-#endif
-
-/* vec_float */
-
-static __inline__ vector float __ATTRS_o_ai
-vec_sld(vector float, vector float, unsigned const int __c);
-
-static __inline__ vector float __ATTRS_o_ai
-vec_float(vector signed int __a) {
-  return __builtin_convertvector(__a, vector float);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_float(vector unsigned int __a) {
-  return __builtin_convertvector(__a, vector float);
-}
-
-#ifdef __VSX__
-static __inline__ vector float __ATTRS_o_ai
-vec_float2(vector signed long long __a, vector signed long long __b) {
-  return (vector float) { __a[0], __a[1], __b[0], __b[1] };
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_float2(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector float) { __a[0], __a[1], __b[0], __b[1] };
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_float2(vector double __a, vector double __b) {
-  return (vector float) { __a[0], __a[1], __b[0], __b[1] };
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floate(vector signed long long __a) {
-#ifdef __LITTLE_ENDIAN__
-  vector float __ret = __builtin_vsx_xvcvsxdsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#else
-  return __builtin_vsx_xvcvsxdsp(__a);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floate(vector unsigned long long __a) {
-#ifdef __LITTLE_ENDIAN__
-  vector float __ret = __builtin_vsx_xvcvuxdsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#else
-  return __builtin_vsx_xvcvuxdsp(__a);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floate(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  vector float __ret = __builtin_vsx_xvcvdpsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#else
-  return __builtin_vsx_xvcvdpsp(__a);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floato(vector signed long long __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvsxdsp(__a);
-#else
-  vector float __ret = __builtin_vsx_xvcvsxdsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floato(vector unsigned long long __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvuxdsp(__a);
-#else
-  vector float __ret = __builtin_vsx_xvcvuxdsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floato(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvdpsp(__a);
-#else
-  vector float __ret = __builtin_vsx_xvcvdpsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#endif
-}
-#endif
-
-/* vec_double */
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai
-vec_double(vector signed long long __a) {
-  return __builtin_convertvector(__a, vector double);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_double(vector unsigned long long __a) {
-  return __builtin_convertvector(__a, vector double);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublee(vector signed int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvsxwdp(vec_sld(__a, __a, 4));
-#else
-  return __builtin_vsx_xvcvsxwdp(__a);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublee(vector unsigned int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvuxwdp(vec_sld(__a, __a, 4));
-#else
-  return __builtin_vsx_xvcvuxwdp(__a);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublee(vector float __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvspdp(vec_sld(__a, __a, 4));
-#else
-  return __builtin_vsx_xvcvspdp(__a);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleh(vector signed int __a) {
-  vector double __ret = {__a[0], __a[1]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleh(vector unsigned int __a) {
-  vector double __ret = {__a[0], __a[1]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleh(vector float __a) {
-  vector double __ret = {__a[0], __a[1]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublel(vector signed int __a) {
-  vector double __ret = {__a[2], __a[3]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublel(vector unsigned int __a) {
-  vector double __ret = {__a[2], __a[3]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublel(vector float __a) {
-  vector double __ret = {__a[2], __a[3]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleo(vector signed int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvsxwdp(__a);
-#else
-  return __builtin_vsx_xvcvsxwdp(vec_sld(__a, __a, 4));
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleo(vector unsigned int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvuxwdp(__a);
-#else
-  return __builtin_vsx_xvcvuxwdp(vec_sld(__a, __a, 4));
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleo(vector float __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvspdp(__a);
-#else
-  return __builtin_vsx_xvcvspdp(vec_sld(__a, __a, 4));
-#endif
-}
-#endif
-
-/* vec_div */
-
-/* Integer vector divides (vectors are scalarized, elements divided
-   and the vectors reassembled).
-*/
-static __inline__ vector signed char __ATTRS_o_ai
-vec_div(vector signed char __a, vector signed char __b) {
-  return __a / __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_div(vector unsigned char __a, vector unsigned char __b) {
-  return __a / __b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_div(vector signed short __a, vector signed short __b) {
-  return __a / __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_div(vector unsigned short __a, vector unsigned short __b) {
-  return __a / __b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_div(vector signed int __a, vector signed int __b) {
-  return __a / __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_div(vector unsigned int __a, vector unsigned int __b) {
-  return __a / __b;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_div(vector signed long long __a, vector signed long long __b) {
-  return __a / __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_div(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a / __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_div(vector float __a,
-                                                    vector float __b) {
-  return __a / __b;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_div(vector double __a,
-                                                     vector double __b) {
-  return __a / __b;
-}
-#endif
-
-/* vec_dss */
-
-static __inline__ void __attribute__((__always_inline__)) vec_dss(int __a) {
-  __builtin_altivec_dss(__a);
-}
-
-/* vec_dssall */
-
-static __inline__ void __attribute__((__always_inline__)) vec_dssall(void) {
-  __builtin_altivec_dssall();
-}
-
-/* vec_dst */
-#define vec_dst(__PTR, __CW, __STR) \
-  __extension__(                    \
-      { __builtin_altivec_dst((const void *)(__PTR), (__CW), (__STR)); })
-
-/* vec_dstst */
-#define vec_dstst(__PTR, __CW, __STR) \
-  __extension__(                      \
-      { __builtin_altivec_dstst((const void *)(__PTR), (__CW), (__STR)); })
-
-/* vec_dststt */
-#define vec_dststt(__PTR, __CW, __STR) \
-  __extension__(                       \
-      { __builtin_altivec_dststt((const void *)(__PTR), (__CW), (__STR)); })
-
-/* vec_dstt */
-#define vec_dstt(__PTR, __CW, __STR) \
-  __extension__(                     \
-      { __builtin_altivec_dstt((const void *)(__PTR), (__CW), (__STR)); })
-
-/* vec_eqv */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed char __ATTRS_o_ai
-vec_eqv(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                  (vector unsigned int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_eqv(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                    (vector unsigned int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_eqv(vector bool char __a,
-                                                        vector bool char __b) {
-  return (vector bool char)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                (vector unsigned int)__b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_eqv(vector signed short __a, vector signed short __b) {
-  return (vector signed short)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                   (vector unsigned int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_eqv(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                     (vector unsigned int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_eqv(vector bool short __a, vector bool short __b) {
-  return (vector bool short)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                 (vector unsigned int)__b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_eqv(vector signed int __a, vector signed int __b) {
-  return (vector signed int)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                 (vector unsigned int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_eqv(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_vsx_xxleqv(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_eqv(vector bool int __a,
-                                                       vector bool int __b) {
-  return (vector bool int)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                               (vector unsigned int)__b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_eqv(vector signed long long __a, vector signed long long __b) {
-  return (vector signed long long)__builtin_vsx_xxleqv(
-      (vector unsigned int)__a, (vector unsigned int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_eqv(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__builtin_vsx_xxleqv(
-      (vector unsigned int)__a, (vector unsigned int)__b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_eqv(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                     (vector unsigned int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_eqv(vector float __a,
-                                                    vector float __b) {
-  return (vector float)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                            (vector unsigned int)__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_eqv(vector double __a,
-                                                     vector double __b) {
-  return (vector double)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                             (vector unsigned int)__b);
-}
-#endif
-
-/* vec_expte */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_expte(vector float __a) {
-  return __builtin_altivec_vexptefp(__a);
-}
-
-/* vec_vexptefp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vexptefp(vector float __a) {
-  return __builtin_altivec_vexptefp(__a);
-}
-
-/* vec_floor */
-
-static __inline__ vector float __ATTRS_o_ai vec_floor(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvrspim(__a);
-#else
-  return __builtin_altivec_vrfim(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_floor(vector double __a) {
-  return __builtin_vsx_xvrdpim(__a);
-}
-#endif
-
-/* vec_vrfim */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vrfim(vector float __a) {
-  return __builtin_altivec_vrfim(__a);
-}
-
-/* vec_ld */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_ld(int __a, const vector signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_ld(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_ld(int __a, const vector unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_ld(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_ld(int __a, const vector bool char *__b) {
-  return (vector bool char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_ld(int __a,
-                                                   const vector short *__b) {
-  return (vector short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_ld(int __a, const short *__b) {
-  return (vector short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_ld(int __a, const vector unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_ld(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_ld(int __a, const vector bool short *__b) {
-  return (vector bool short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_ld(int __a,
-                                                   const vector pixel *__b) {
-  return (vector pixel)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_ld(int __a,
-                                                 const vector int *__b) {
-  return (vector int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_ld(int __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_ld(int __a, const vector unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_ld(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_ld(int __a, const vector bool int *__b) {
-  return (vector bool int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_ld(int __a,
-                                                   const vector float *__b) {
-  return (vector float)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_ld(int __a, const float *__b) {
-  return (vector float)__builtin_altivec_lvx(__a, __b);
-}
-
-/* vec_lvx */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvx(int __a, const vector signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvx(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvx(int __a, const vector unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvx(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvx(int __a, const vector bool char *__b) {
-  return (vector bool char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvx(int __a,
-                                                    const vector short *__b) {
-  return (vector short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvx(int __a, const short *__b) {
-  return (vector short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvx(int __a, const vector unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvx(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvx(int __a, const vector bool short *__b) {
-  return (vector bool short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvx(int __a,
-                                                    const vector pixel *__b) {
-  return (vector pixel)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvx(int __a,
-                                                  const vector int *__b) {
-  return (vector int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvx(int __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvx(int __a, const vector unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvx(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvx(int __a, const vector bool int *__b) {
-  return (vector bool int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvx(int __a,
-                                                    const vector float *__b) {
-  return (vector float)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvx(int __a, const float *__b) {
-  return (vector float)__builtin_altivec_lvx(__a, __b);
-}
-
-/* vec_lde */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lde(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvebx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lde(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvebx(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lde(int __a, const short *__b) {
-  return (vector short)__builtin_altivec_lvehx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lde(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvehx(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lde(int __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvewx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lde(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvewx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lde(int __a, const float *__b) {
-  return (vector float)__builtin_altivec_lvewx(__a, __b);
-}
-
-/* vec_lvebx */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvebx(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvebx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvebx(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvebx(__a, __b);
-}
-
-/* vec_lvehx */
-
-static __inline__ vector short __ATTRS_o_ai vec_lvehx(int __a,
-                                                      const short *__b) {
-  return (vector short)__builtin_altivec_lvehx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvehx(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvehx(__a, __b);
-}
-
-/* vec_lvewx */
-
-static __inline__ vector int __ATTRS_o_ai vec_lvewx(int __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvewx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvewx(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvewx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvewx(int __a,
-                                                      const float *__b) {
-  return (vector float)__builtin_altivec_lvewx(__a, __b);
-}
-
-/* vec_ldl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_ldl(int __a, const vector signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_ldl(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_ldl(int __a, const vector unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_ldl(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_ldl(int __a, const vector bool char *__b) {
-  return (vector bool char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_ldl(int __a,
-                                                    const vector short *__b) {
-  return (vector short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_ldl(int __a, const short *__b) {
-  return (vector short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_ldl(int __a, const vector unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_ldl(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_ldl(int __a, const vector bool short *__b) {
-  return (vector bool short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_ldl(int __a,
-                                                    const vector pixel *__b) {
-  return (vector pixel short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_ldl(int __a,
-                                                  const vector int *__b) {
-  return (vector int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_ldl(int __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_ldl(int __a, const vector unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_ldl(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_ldl(int __a, const vector bool int *__b) {
-  return (vector bool int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_ldl(int __a,
-                                                    const vector float *__b) {
-  return (vector float)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_ldl(int __a, const float *__b) {
-  return (vector float)__builtin_altivec_lvxl(__a, __b);
-}
-
-/* vec_lvxl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvxl(int __a, const vector signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvxl(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvxl(int __a, const vector unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvxl(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvxl(int __a, const vector bool char *__b) {
-  return (vector bool char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvxl(int __a,
-                                                     const vector short *__b) {
-  return (vector short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvxl(int __a,
-                                                     const short *__b) {
-  return (vector short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvxl(int __a, const vector unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvxl(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvxl(int __a, const vector bool short *__b) {
-  return (vector bool short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvxl(int __a,
-                                                     const vector pixel *__b) {
-  return (vector pixel)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvxl(int __a,
-                                                   const vector int *__b) {
-  return (vector int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvxl(int __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvxl(int __a, const vector unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvxl(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvxl(int __a, const vector bool int *__b) {
-  return (vector bool int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvxl(int __a,
-                                                     const vector float *__b) {
-  return (vector float)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvxl(int __a,
-                                                     const float *__b) {
-  return (vector float)__builtin_altivec_lvxl(__a, __b);
-}
-
-/* vec_loge */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_loge(vector float __a) {
-  return __builtin_altivec_vlogefp(__a);
-}
-
-/* vec_vlogefp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vlogefp(vector float __a) {
-  return __builtin_altivec_vlogefp(__a);
-}
-
-/* vec_lvsl */
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const signed char *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsl(int __a, const signed char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const unsigned char *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsl(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const short *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
-                                                             const short *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const unsigned short *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsl(int __a, const unsigned short *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const int *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
-                                                             const int *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const unsigned int *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsl(int __a, const unsigned int *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const float *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
-                                                             const float *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-/* vec_lvsr */
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const signed char *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsr(int __a, const signed char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const unsigned char *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsr(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const short *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
-                                                             const short *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const unsigned short *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsr(int __a, const unsigned short *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const int *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
-                                                             const int *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const unsigned int *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsr(int __a, const unsigned int *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const float *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
-                                                             const float *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-/* vec_madd */
-static __inline__ vector signed short __ATTRS_o_ai
-vec_mladd(vector signed short, vector signed short, vector signed short);
-static __inline__ vector signed short __ATTRS_o_ai
-vec_mladd(vector signed short, vector unsigned short, vector unsigned short);
-static __inline__ vector signed short __ATTRS_o_ai
-vec_mladd(vector unsigned short, vector signed short, vector signed short);
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mladd(vector unsigned short, vector unsigned short, vector unsigned short);
-
-static __inline__ vector signed short __ATTRS_o_ai vec_madd(
-    vector signed short __a, vector signed short __b, vector signed short __c) {
-  return vec_mladd(__a, __b, __c);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_madd(vector signed short __a, vector unsigned short __b,
-         vector unsigned short __c) {
-  return vec_mladd(__a, __b, __c);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_madd(vector unsigned short __a, vector signed short __b,
-         vector signed short __c) {
-  return vec_mladd(__a, __b, __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_madd(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned short __c) {
-  return vec_mladd(__a, __b, __c);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_madd(vector float __a,
-                                                     vector float __b,
-                                                     vector float __c) {
-#ifdef __VSX__
-  return __builtin_vsx_xvmaddasp(__a, __b, __c);
-#else
-  return __builtin_altivec_vmaddfp(__a, __b, __c);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_madd(vector double __a,
-                                                      vector double __b,
-                                                      vector double __c) {
-  return __builtin_vsx_xvmaddadp(__a, __b, __c);
-}
-#endif
-
-/* vec_vmaddfp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vmaddfp(vector float __a, vector float __b, vector float __c) {
-  return __builtin_altivec_vmaddfp(__a, __b, __c);
-}
-
-/* vec_madds */
-
-static __inline__ vector signed short __attribute__((__always_inline__))
-vec_madds(vector signed short __a, vector signed short __b,
-          vector signed short __c) {
-  return __builtin_altivec_vmhaddshs(__a, __b, __c);
-}
-
-/* vec_vmhaddshs */
-static __inline__ vector signed short __attribute__((__always_inline__))
-vec_vmhaddshs(vector signed short __a, vector signed short __b,
-              vector signed short __c) {
-  return __builtin_altivec_vmhaddshs(__a, __b, __c);
-}
-
-/* vec_msub */
-
-#ifdef __VSX__
-static __inline__ vector float __ATTRS_o_ai vec_msub(vector float __a,
-                                                     vector float __b,
-                                                     vector float __c) {
-  return __builtin_vsx_xvmsubasp(__a, __b, __c);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_msub(vector double __a,
-                                                      vector double __b,
-                                                      vector double __c) {
-  return __builtin_vsx_xvmsubadp(__a, __b, __c);
-}
-#endif
-
-/* vec_max */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_max(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vmaxsb(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_max(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vmaxsb((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_max(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vmaxsb(__a, (vector signed char)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_max(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vmaxub(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_max(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vmaxub((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_max(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vmaxub(__a, (vector unsigned char)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_max(vector short __a,
-                                                    vector short __b) {
-  return __builtin_altivec_vmaxsh(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_max(vector bool short __a,
-                                                    vector short __b) {
-  return __builtin_altivec_vmaxsh((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_max(vector short __a,
-                                                    vector bool short __b) {
-  return __builtin_altivec_vmaxsh(__a, (vector short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_max(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vmaxuh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_max(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vmaxuh((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_max(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vmaxuh(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_max(vector int __a,
-                                                  vector int __b) {
-  return __builtin_altivec_vmaxsw(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_max(vector bool int __a,
-                                                  vector int __b) {
-  return __builtin_altivec_vmaxsw((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_max(vector int __a,
-                                                  vector bool int __b) {
-  return __builtin_altivec_vmaxsw(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_max(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vmaxuw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_max(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vmaxuw((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_max(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vmaxuw(__a, (vector unsigned int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_max(vector signed long long __a, vector signed long long __b) {
-  return __builtin_altivec_vmaxsd(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_max(vector bool long long __a, vector signed long long __b) {
-  return __builtin_altivec_vmaxsd((vector signed long long)__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_max(vector signed long long __a, vector bool long long __b) {
-  return __builtin_altivec_vmaxsd(__a, (vector signed long long)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_max(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vmaxud(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_max(vector bool long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vmaxud((vector unsigned long long)__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_max(vector unsigned long long __a, vector bool long long __b) {
-  return __builtin_altivec_vmaxud(__a, (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_max(vector float __a,
-                                                    vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvmaxsp(__a, __b);
-#else
-  return __builtin_altivec_vmaxfp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_max(vector double __a,
-                                                     vector double __b) {
-  return __builtin_vsx_xvmaxdp(__a, __b);
-}
-#endif
-
-/* vec_vmaxsb */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vmaxsb(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vmaxsb(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vmaxsb(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vmaxsb((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vmaxsb(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vmaxsb(__a, (vector signed char)__b);
-}
-
-/* vec_vmaxub */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vmaxub(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vmaxub(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vmaxub(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vmaxub((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vmaxub(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vmaxub(__a, (vector unsigned char)__b);
-}
-
-/* vec_vmaxsh */
-
-static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector short __a,
-                                                       vector short __b) {
-  return __builtin_altivec_vmaxsh(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector bool short __a,
-                                                       vector short __b) {
-  return __builtin_altivec_vmaxsh((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector short __a,
-                                                       vector bool short __b) {
-  return __builtin_altivec_vmaxsh(__a, (vector short)__b);
-}
-
-/* vec_vmaxuh */
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmaxuh(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vmaxuh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmaxuh(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vmaxuh((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmaxuh(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vmaxuh(__a, (vector unsigned short)__b);
-}
-
-/* vec_vmaxsw */
-
-static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector int __a,
-                                                     vector int __b) {
-  return __builtin_altivec_vmaxsw(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector bool int __a,
-                                                     vector int __b) {
-  return __builtin_altivec_vmaxsw((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector int __a,
-                                                     vector bool int __b) {
-  return __builtin_altivec_vmaxsw(__a, (vector int)__b);
-}
-
-/* vec_vmaxuw */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vmaxuw(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vmaxuw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vmaxuw(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vmaxuw((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vmaxuw(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vmaxuw(__a, (vector unsigned int)__b);
-}
-
-/* vec_vmaxfp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vmaxfp(vector float __a, vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvmaxsp(__a, __b);
-#else
-  return __builtin_altivec_vmaxfp(__a, __b);
-#endif
-}
-
-/* vec_mergeh */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_mergeh(vector signed char __a, vector signed char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_mergeh(vector unsigned char __a, vector unsigned char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_mergeh(vector bool char __a, vector bool char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_mergeh(vector short __a,
-                                                       vector short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mergeh(vector unsigned short __a, vector unsigned short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_mergeh(vector bool short __a, vector bool short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_mergeh(vector pixel __a,
-                                                       vector pixel __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_mergeh(vector int __a,
-                                                     vector int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mergeh(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_mergeh(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_mergeh(vector float __a,
-                                                       vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergeh(vector signed long long __a, vector signed long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergeh(vector signed long long __a, vector bool long long __b) {
-  return vec_perm(__a, (vector signed long long)__b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergeh(vector bool long long __a, vector signed long long __b) {
-  return vec_perm((vector signed long long)__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergeh(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergeh(vector unsigned long long __a, vector bool long long __b) {
-  return vec_perm(__a, (vector unsigned long long)__b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergeh(vector bool long long __a, vector unsigned long long __b) {
-  return vec_perm((vector unsigned long long)__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_mergeh(vector bool long long __a, vector bool long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_mergeh(vector double __a,
-                                                        vector double __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-static __inline__ vector double __ATTRS_o_ai
-vec_mergeh(vector double __a, vector bool long long __b) {
-  return vec_perm(__a, (vector double)__b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-static __inline__ vector double __ATTRS_o_ai
-vec_mergeh(vector bool long long __a, vector double __b) {
-  return vec_perm((vector double)__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-#endif
-
-/* vec_vmrghb */
-
-#define __builtin_altivec_vmrghb vec_vmrghb
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vmrghb(vector signed char __a, vector signed char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vmrghb(vector unsigned char __a, vector unsigned char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vmrghb(vector bool char __a, vector bool char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-/* vec_vmrghh */
-
-#define __builtin_altivec_vmrghh vec_vmrghh
-
-static __inline__ vector short __ATTRS_o_ai vec_vmrghh(vector short __a,
-                                                       vector short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmrghh(vector unsigned short __a, vector unsigned short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vmrghh(vector bool short __a, vector bool short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vmrghh(vector pixel __a,
-                                                       vector pixel __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-/* vec_vmrghw */
-
-#define __builtin_altivec_vmrghw vec_vmrghw
-
-static __inline__ vector int __ATTRS_o_ai vec_vmrghw(vector int __a,
-                                                     vector int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vmrghw(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vmrghw(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vmrghw(vector float __a,
-                                                       vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-/* vec_mergel */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_mergel(vector signed char __a, vector signed char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_mergel(vector unsigned char __a, vector unsigned char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_mergel(vector bool char __a, vector bool char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_mergel(vector short __a,
-                                                       vector short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mergel(vector unsigned short __a, vector unsigned short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_mergel(vector bool short __a, vector bool short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_mergel(vector pixel __a,
-                                                       vector pixel __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_mergel(vector int __a,
-                                                     vector int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mergel(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_mergel(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_mergel(vector float __a,
-                                                       vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergel(vector signed long long __a, vector signed long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergel(vector signed long long __a, vector bool long long __b) {
-  return vec_perm(__a, (vector signed long long)__b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergel(vector bool long long __a, vector signed long long __b) {
-  return vec_perm((vector signed long long)__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergel(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergel(vector unsigned long long __a, vector bool long long __b) {
-  return vec_perm(__a, (vector unsigned long long)__b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergel(vector bool long long __a, vector unsigned long long __b) {
-  return vec_perm((vector unsigned long long)__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_mergel(vector bool long long __a, vector bool long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector double __ATTRS_o_ai vec_mergel(vector double __a,
-                                                        vector double __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector double __ATTRS_o_ai
-vec_mergel(vector double __a, vector bool long long __b) {
-  return vec_perm(__a, (vector double)__b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector double __ATTRS_o_ai
-vec_mergel(vector bool long long __a, vector double __b) {
-  return vec_perm((vector double)__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-#endif
-
-/* vec_vmrglb */
-
-#define __builtin_altivec_vmrglb vec_vmrglb
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vmrglb(vector signed char __a, vector signed char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vmrglb(vector unsigned char __a, vector unsigned char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vmrglb(vector bool char __a, vector bool char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-/* vec_vmrglh */
-
-#define __builtin_altivec_vmrglh vec_vmrglh
-
-static __inline__ vector short __ATTRS_o_ai vec_vmrglh(vector short __a,
-                                                       vector short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmrglh(vector unsigned short __a, vector unsigned short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vmrglh(vector bool short __a, vector bool short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vmrglh(vector pixel __a,
-                                                       vector pixel __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-/* vec_vmrglw */
-
-#define __builtin_altivec_vmrglw vec_vmrglw
-
-static __inline__ vector int __ATTRS_o_ai vec_vmrglw(vector int __a,
-                                                     vector int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vmrglw(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vmrglw(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vmrglw(vector float __a,
-                                                       vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-#ifdef __POWER8_VECTOR__
-/* vec_mergee */
-
-static __inline__ vector bool int __ATTRS_o_ai vec_mergee(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
-                                         0x18, 0x19, 0x1A, 0x1B));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_mergee(vector signed int __a, vector signed int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
-                                         0x18, 0x19, 0x1A, 0x1B));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mergee(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
-                                         0x18, 0x19, 0x1A, 0x1B));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_mergee(vector bool long long __a, vector bool long long __b) {
-  return vec_mergeh(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergee(vector signed long long __a, vector signed long long __b) {
-  return vec_mergeh(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergee(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_mergeh(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_mergee(vector float __a, vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
-                                         0x18, 0x19, 0x1A, 0x1B));
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_mergee(vector double __a, vector double __b) {
-  return vec_mergeh(__a, __b);
-}
-
-/* vec_mergeo */
-
-static __inline__ vector bool int __ATTRS_o_ai vec_mergeo(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
-                                         0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_mergeo(vector signed int __a, vector signed int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
-                                         0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mergeo(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
-                                         0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_mergeo(vector bool long long __a, vector bool long long __b) {
-  return vec_mergel(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergeo(vector signed long long __a, vector signed long long __b) {
-  return vec_mergel(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergeo(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_mergel(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_mergeo(vector float __a, vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
-                                         0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_mergeo(vector double __a, vector double __b) {
-  return vec_mergel(__a, __b);
-}
-
-#endif
-
-/* vec_mfvscr */
-
-static __inline__ vector unsigned short __attribute__((__always_inline__))
-vec_mfvscr(void) {
-  return __builtin_altivec_mfvscr();
-}
-
-/* vec_min */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_min(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vminsb(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_min(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vminsb((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_min(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vminsb(__a, (vector signed char)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_min(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vminub(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_min(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vminub((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_min(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vminub(__a, (vector unsigned char)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_min(vector short __a,
-                                                    vector short __b) {
-  return __builtin_altivec_vminsh(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_min(vector bool short __a,
-                                                    vector short __b) {
-  return __builtin_altivec_vminsh((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_min(vector short __a,
-                                                    vector bool short __b) {
-  return __builtin_altivec_vminsh(__a, (vector short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_min(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vminuh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_min(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vminuh((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_min(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vminuh(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_min(vector int __a,
-                                                  vector int __b) {
-  return __builtin_altivec_vminsw(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_min(vector bool int __a,
-                                                  vector int __b) {
-  return __builtin_altivec_vminsw((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_min(vector int __a,
-                                                  vector bool int __b) {
-  return __builtin_altivec_vminsw(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_min(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vminuw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_min(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vminuw((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_min(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vminuw(__a, (vector unsigned int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_min(vector signed long long __a, vector signed long long __b) {
-  return __builtin_altivec_vminsd(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_min(vector bool long long __a, vector signed long long __b) {
-  return __builtin_altivec_vminsd((vector signed long long)__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_min(vector signed long long __a, vector bool long long __b) {
-  return __builtin_altivec_vminsd(__a, (vector signed long long)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_min(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vminud(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_min(vector bool long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vminud((vector unsigned long long)__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_min(vector unsigned long long __a, vector bool long long __b) {
-  return __builtin_altivec_vminud(__a, (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_min(vector float __a,
-                                                    vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvminsp(__a, __b);
-#else
-  return __builtin_altivec_vminfp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_min(vector double __a,
-                                                     vector double __b) {
-  return __builtin_vsx_xvmindp(__a, __b);
-}
-#endif
-
-/* vec_vminsb */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vminsb(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vminsb(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vminsb(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vminsb((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vminsb(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vminsb(__a, (vector signed char)__b);
-}
-
-/* vec_vminub */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vminub(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vminub(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vminub(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vminub((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vminub(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vminub(__a, (vector unsigned char)__b);
-}
-
-/* vec_vminsh */
-
-static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector short __a,
-                                                       vector short __b) {
-  return __builtin_altivec_vminsh(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector bool short __a,
-                                                       vector short __b) {
-  return __builtin_altivec_vminsh((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector short __a,
-                                                       vector bool short __b) {
-  return __builtin_altivec_vminsh(__a, (vector short)__b);
-}
-
-/* vec_vminuh */
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vminuh(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vminuh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vminuh(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vminuh((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vminuh(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vminuh(__a, (vector unsigned short)__b);
-}
-
-/* vec_vminsw */
-
-static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector int __a,
-                                                     vector int __b) {
-  return __builtin_altivec_vminsw(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector bool int __a,
-                                                     vector int __b) {
-  return __builtin_altivec_vminsw((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector int __a,
-                                                     vector bool int __b) {
-  return __builtin_altivec_vminsw(__a, (vector int)__b);
-}
-
-/* vec_vminuw */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vminuw(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vminuw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vminuw(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vminuw((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vminuw(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vminuw(__a, (vector unsigned int)__b);
-}
-
-/* vec_vminfp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vminfp(vector float __a, vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvminsp(__a, __b);
-#else
-  return __builtin_altivec_vminfp(__a, __b);
-#endif
-}
-
-/* vec_mladd */
-
-#define __builtin_altivec_vmladduhm vec_mladd
-
-static __inline__ vector short __ATTRS_o_ai vec_mladd(vector short __a,
-                                                      vector short __b,
-                                                      vector short __c) {
-  return __a * __b + __c;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_mladd(
-    vector short __a, vector unsigned short __b, vector unsigned short __c) {
-  return __a * (vector short)__b + (vector short)__c;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_mladd(vector unsigned short __a,
-                                                      vector short __b,
-                                                      vector short __c) {
-  return (vector short)__a * __b + __c;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mladd(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned short __c) {
-  return __a * __b + __c;
-}
-
-/* vec_vmladduhm */
-
-static __inline__ vector short __ATTRS_o_ai vec_vmladduhm(vector short __a,
-                                                          vector short __b,
-                                                          vector short __c) {
-  return __a * __b + __c;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vmladduhm(
-    vector short __a, vector unsigned short __b, vector unsigned short __c) {
-  return __a * (vector short)__b + (vector short)__c;
-}
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vmladduhm(vector unsigned short __a, vector short __b, vector short __c) {
-  return (vector short)__a * __b + __c;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmladduhm(vector unsigned short __a, vector unsigned short __b,
-              vector unsigned short __c) {
-  return __a * __b + __c;
-}
-
-/* vec_mradds */
-
-static __inline__ vector short __attribute__((__always_inline__))
-vec_mradds(vector short __a, vector short __b, vector short __c) {
-  return __builtin_altivec_vmhraddshs(__a, __b, __c);
-}
-
-/* vec_vmhraddshs */
-
-static __inline__ vector short __attribute__((__always_inline__))
-vec_vmhraddshs(vector short __a, vector short __b, vector short __c) {
-  return __builtin_altivec_vmhraddshs(__a, __b, __c);
-}
-
-/* vec_msum */
-
-static __inline__ vector int __ATTRS_o_ai vec_msum(vector signed char __a,
-                                                   vector unsigned char __b,
-                                                   vector int __c) {
-  return __builtin_altivec_vmsummbm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_msum(vector unsigned char __a, vector unsigned char __b,
-         vector unsigned int __c) {
-  return __builtin_altivec_vmsumubm(__a, __b, __c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_msum(vector short __a,
-                                                   vector short __b,
-                                                   vector int __c) {
-  return __builtin_altivec_vmsumshm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_msum(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned int __c) {
-  return __builtin_altivec_vmsumuhm(__a, __b, __c);
-}
-
-/* vec_vmsummbm */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vmsummbm(vector signed char __a, vector unsigned char __b, vector int __c) {
-  return __builtin_altivec_vmsummbm(__a, __b, __c);
-}
-
-/* vec_vmsumubm */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vmsumubm(vector unsigned char __a, vector unsigned char __b,
-             vector unsigned int __c) {
-  return __builtin_altivec_vmsumubm(__a, __b, __c);
-}
-
-/* vec_vmsumshm */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vmsumshm(vector short __a, vector short __b, vector int __c) {
-  return __builtin_altivec_vmsumshm(__a, __b, __c);
-}
-
-/* vec_vmsumuhm */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vmsumuhm(vector unsigned short __a, vector unsigned short __b,
-             vector unsigned int __c) {
-  return __builtin_altivec_vmsumuhm(__a, __b, __c);
-}
-
-/* vec_msums */
-
-static __inline__ vector int __ATTRS_o_ai vec_msums(vector short __a,
-                                                    vector short __b,
-                                                    vector int __c) {
-  return __builtin_altivec_vmsumshs(__a, __b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_msums(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned int __c) {
-  return __builtin_altivec_vmsumuhs(__a, __b, __c);
-}
-
-/* vec_vmsumshs */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vmsumshs(vector short __a, vector short __b, vector int __c) {
-  return __builtin_altivec_vmsumshs(__a, __b, __c);
-}
-
-/* vec_vmsumuhs */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vmsumuhs(vector unsigned short __a, vector unsigned short __b,
-             vector unsigned int __c) {
-  return __builtin_altivec_vmsumuhs(__a, __b, __c);
-}
-
-/* vec_mtvscr */
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector signed char __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned char __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool char __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector short __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned short __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool short __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector pixel __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector int __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned int __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool int __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector float __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-/* vec_mul */
-
-/* Integer vector multiplication will involve multiplication of the odd/even
-   elements separately, then truncating the results and moving to the
-   result vector.
-*/
-static __inline__ vector signed char __ATTRS_o_ai
-vec_mul(vector signed char __a, vector signed char __b) {
-  return __a * __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_mul(vector unsigned char __a, vector unsigned char __b) {
-  return __a * __b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_mul(vector signed short __a, vector signed short __b) {
-  return __a * __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mul(vector unsigned short __a, vector unsigned short __b) {
-  return __a * __b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_mul(vector signed int __a, vector signed int __b) {
-  return __a * __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mul(vector unsigned int __a, vector unsigned int __b) {
-  return __a * __b;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mul(vector signed long long __a, vector signed long long __b) {
-  return __a * __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mul(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a * __b;
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_mul(vector float __a,
-                                                    vector float __b) {
-  return __a * __b;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_mul(vector double __a,
-                                                     vector double __b) {
-  return __a * __b;
-}
-#endif
-
-/* The vmulos* and vmules* instructions have a big endian bias, so
-   we must reverse the meaning of "even" and "odd" for little endian.  */
-
-/* vec_mule */
-
-static __inline__ vector short __ATTRS_o_ai vec_mule(vector signed char __a,
-                                                     vector signed char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulosb(__a, __b);
-#else
-  return __builtin_altivec_vmulesb(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mule(vector unsigned char __a, vector unsigned char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuloub(__a, __b);
-#else
-  return __builtin_altivec_vmuleub(__a, __b);
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_mule(vector short __a,
-                                                   vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulosh(__a, __b);
-#else
-  return __builtin_altivec_vmulesh(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mule(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulouh(__a, __b);
-#else
-  return __builtin_altivec_vmuleuh(__a, __b);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mule(vector signed int __a, vector signed int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulosw(__a, __b);
-#else
-  return __builtin_altivec_vmulesw(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mule(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulouw(__a, __b);
-#else
-  return __builtin_altivec_vmuleuw(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vmulesb */
-
-static __inline__ vector short __attribute__((__always_inline__))
-vec_vmulesb(vector signed char __a, vector signed char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulosb(__a, __b);
-#else
-  return __builtin_altivec_vmulesb(__a, __b);
-#endif
-}
-
-/* vec_vmuleub */
-
-static __inline__ vector unsigned short __attribute__((__always_inline__))
-vec_vmuleub(vector unsigned char __a, vector unsigned char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuloub(__a, __b);
-#else
-  return __builtin_altivec_vmuleub(__a, __b);
-#endif
-}
-
-/* vec_vmulesh */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vmulesh(vector short __a, vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulosh(__a, __b);
-#else
-  return __builtin_altivec_vmulesh(__a, __b);
-#endif
-}
-
-/* vec_vmuleuh */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vmuleuh(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulouh(__a, __b);
-#else
-  return __builtin_altivec_vmuleuh(__a, __b);
-#endif
-}
-
-/* vec_mulo */
-
-static __inline__ vector short __ATTRS_o_ai vec_mulo(vector signed char __a,
-                                                     vector signed char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulesb(__a, __b);
-#else
-  return __builtin_altivec_vmulosb(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mulo(vector unsigned char __a, vector unsigned char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuleub(__a, __b);
-#else
-  return __builtin_altivec_vmuloub(__a, __b);
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_mulo(vector short __a,
-                                                   vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulesh(__a, __b);
-#else
-  return __builtin_altivec_vmulosh(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mulo(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuleuh(__a, __b);
-#else
-  return __builtin_altivec_vmulouh(__a, __b);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mulo(vector signed int __a, vector signed int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulesw(__a, __b);
-#else
-  return __builtin_altivec_vmulosw(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mulo(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuleuw(__a, __b);
-#else
-  return __builtin_altivec_vmulouw(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vmulosb */
-
-static __inline__ vector short __attribute__((__always_inline__))
-vec_vmulosb(vector signed char __a, vector signed char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulesb(__a, __b);
-#else
-  return __builtin_altivec_vmulosb(__a, __b);
-#endif
-}
-
-/* vec_vmuloub */
-
-static __inline__ vector unsigned short __attribute__((__always_inline__))
-vec_vmuloub(vector unsigned char __a, vector unsigned char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuleub(__a, __b);
-#else
-  return __builtin_altivec_vmuloub(__a, __b);
-#endif
-}
-
-/* vec_vmulosh */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vmulosh(vector short __a, vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulesh(__a, __b);
-#else
-  return __builtin_altivec_vmulosh(__a, __b);
-#endif
-}
-
-/* vec_vmulouh */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vmulouh(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuleuh(__a, __b);
-#else
-  return __builtin_altivec_vmulouh(__a, __b);
-#endif
-}
-
-/*  vec_nand */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed char __ATTRS_o_ai
-vec_nand(vector signed char __a, vector signed char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_nand(vector signed char __a, vector bool char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_nand(vector bool char __a, vector signed char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_nand(vector unsigned char __a, vector unsigned char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_nand(vector unsigned char __a, vector bool char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_nand(vector bool char __a, vector unsigned char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_nand(vector bool char __a,
-                                                         vector bool char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_nand(vector signed short __a, vector signed short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_nand(vector signed short __a, vector bool short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_nand(vector bool short __a, vector signed short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_nand(vector unsigned short __a, vector unsigned short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_nand(vector unsigned short __a, vector bool short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_nand(vector bool short __a, vector bool short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_nand(vector signed int __a, vector signed int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai vec_nand(vector signed int __a,
-                                                          vector bool int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_nand(vector bool int __a, vector signed int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_nand(vector unsigned int __a, vector unsigned int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_nand(vector unsigned int __a, vector bool int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_nand(vector bool int __a, vector unsigned int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_nand(vector bool int __a,
-                                                        vector bool int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_nand(vector float __a, vector float __b) {
-  return (vector float)(~((vector unsigned int)__a &
-                          (vector unsigned int)__b));
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_nand(vector signed long long __a, vector signed long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_nand(vector signed long long __a, vector bool long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_nand(vector bool long long __a, vector signed long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_nand(vector unsigned long long __a, vector unsigned long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_nand(vector unsigned long long __a, vector bool long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_nand(vector bool long long __a, vector unsigned long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_nand(vector bool long long __a, vector bool long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_nand(vector double __a, vector double __b) {
-  return (vector double)(~((vector unsigned long long)__a &
-                           (vector unsigned long long)__b));
-}
-
-#endif
-
-/* vec_nmadd */
-
-#ifdef __VSX__
-static __inline__ vector float __ATTRS_o_ai vec_nmadd(vector float __a,
-                                                      vector float __b,
-                                                      vector float __c) {
-  return __builtin_vsx_xvnmaddasp(__a, __b, __c);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_nmadd(vector double __a,
-                                                       vector double __b,
-                                                       vector double __c) {
-  return __builtin_vsx_xvnmaddadp(__a, __b, __c);
-}
-#endif
-
-/* vec_nmsub */
-
-static __inline__ vector float __ATTRS_o_ai vec_nmsub(vector float __a,
-                                                      vector float __b,
-                                                      vector float __c) {
-#ifdef __VSX__
-  return __builtin_vsx_xvnmsubasp(__a, __b, __c);
-#else
-  return __builtin_altivec_vnmsubfp(__a, __b, __c);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_nmsub(vector double __a,
-                                                       vector double __b,
-                                                       vector double __c) {
-  return __builtin_vsx_xvnmsubadp(__a, __b, __c);
-}
-#endif
-
-/* vec_vnmsubfp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vnmsubfp(vector float __a, vector float __b, vector float __c) {
-  return __builtin_altivec_vnmsubfp(__a, __b, __c);
-}
-
-/* vec_nor */
-
-#define __builtin_altivec_vnor vec_nor
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_nor(vector signed char __a, vector signed char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_nor(vector unsigned char __a, vector unsigned char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_nor(vector bool char __a,
-                                                        vector bool char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_nor(vector short __a,
-                                                    vector short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_nor(vector unsigned short __a, vector unsigned short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_nor(vector bool short __a, vector bool short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_nor(vector int __a,
-                                                  vector int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_nor(vector unsigned int __a, vector unsigned int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_nor(vector bool int __a,
-                                                       vector bool int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_nor(vector float __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      ~((vector unsigned int)__a | (vector unsigned int)__b);
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_nor(vector double __a,
-                                                     vector double __b) {
-  vector unsigned long long __res =
-      ~((vector unsigned long long)__a | (vector unsigned long long)__b);
-  return (vector double)__res;
-}
-#endif
-
-/* vec_vnor */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vnor(vector signed char __a, vector signed char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vnor(vector unsigned char __a, vector unsigned char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vnor(vector bool char __a,
-                                                         vector bool char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vnor(vector short __a,
-                                                     vector short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vnor(vector unsigned short __a, vector unsigned short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vnor(vector bool short __a, vector bool short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vnor(vector int __a,
-                                                   vector int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vnor(vector unsigned int __a, vector unsigned int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vnor(vector bool int __a,
-                                                        vector bool int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vnor(vector float __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      ~((vector unsigned int)__a | (vector unsigned int)__b);
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_nor(vector signed long long __a, vector signed long long __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_nor(vector unsigned long long __a, vector unsigned long long __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_nor(vector bool long long __a, vector bool long long __b) {
-  return ~(__a | __b);
-}
-#endif
-
-/* vec_or */
-
-#define __builtin_altivec_vor vec_or
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_or(vector signed char __a, vector signed char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_or(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a | __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai vec_or(vector signed char __a,
-                                                         vector bool char __b) {
-  return __a | (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_or(vector unsigned char __a, vector unsigned char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_or(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a | __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_or(vector unsigned char __a, vector bool char __b) {
-  return __a | (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_or(vector bool char __a,
-                                                       vector bool char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_or(vector short __a,
-                                                   vector short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_or(vector bool short __a,
-                                                   vector short __b) {
-  return (vector short)__a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_or(vector short __a,
-                                                   vector bool short __b) {
-  return __a | (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_or(vector unsigned short __a, vector unsigned short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_or(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a | __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_or(vector unsigned short __a, vector bool short __b) {
-  return __a | (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_or(vector bool short __a,
-                                                        vector bool short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_or(vector int __a,
-                                                 vector int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_or(vector bool int __a,
-                                                 vector int __b) {
-  return (vector int)__a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_or(vector int __a,
-                                                 vector bool int __b) {
-  return __a | (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_or(vector unsigned int __a, vector unsigned int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_or(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a | __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_or(vector unsigned int __a, vector bool int __b) {
-  return __a | (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_or(vector bool int __a,
-                                                      vector bool int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_or(vector float __a,
-                                                   vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_or(vector bool int __a,
-                                                   vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_or(vector float __a,
-                                                   vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_or(vector bool long long __a,
-                                                    vector double __b) {
-  return (vector unsigned long long)__a | (vector unsigned long long)__b;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_or(vector double __a,
-                                                    vector bool long long __b) {
-  return (vector unsigned long long)__a | (vector unsigned long long)__b;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_or(vector double __a,
-                                                    vector double __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a | (vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_or(vector signed long long __a, vector signed long long __b) {
-  return __a | __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_or(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a | __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_or(vector signed long long __a, vector bool long long __b) {
-  return __a | (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_or(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_or(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a | __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_or(vector unsigned long long __a, vector bool long long __b) {
-  return __a | (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_or(vector bool long long __a, vector bool long long __b) {
-  return __a | __b;
-}
-#endif
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed char __ATTRS_o_ai
-vec_orc(vector signed char __a, vector signed char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_orc(vector signed char __a, vector bool char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_orc(vector bool char __a, vector signed char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_orc(vector unsigned char __a, vector unsigned char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_orc(vector unsigned char __a, vector bool char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_orc(vector bool char __a, vector unsigned char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_orc(vector bool char __a,
-                                                        vector bool char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_orc(vector signed short __a, vector signed short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_orc(vector signed short __a, vector bool short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_orc(vector bool short __a, vector signed short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_orc(vector unsigned short __a, vector unsigned short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_orc(vector unsigned short __a, vector bool short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_orc(vector bool short __a, vector unsigned short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_orc(vector bool short __a, vector bool short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_orc(vector signed int __a, vector signed int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai vec_orc(vector signed int __a,
-                                                         vector bool int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_orc(vector bool int __a, vector signed int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_orc(vector unsigned int __a, vector unsigned int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_orc(vector unsigned int __a, vector bool int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_orc(vector bool int __a, vector unsigned int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_orc(vector bool int __a,
-                                                       vector bool int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_orc(vector bool int __a, vector float __b) {
- return (vector float)(__a | ~(vector unsigned int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_orc(vector float __a, vector bool int __b) {
-  return (vector float)((vector unsigned int)__a | ~__b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_orc(vector signed long long __a, vector signed long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_orc(vector signed long long __a, vector bool long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_orc(vector bool long long __a, vector signed long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_orc(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_orc(vector unsigned long long __a, vector bool long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_orc(vector bool long long __a, vector unsigned long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_orc(vector bool long long __a, vector bool long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_orc(vector double __a, vector bool long long __b) {
-  return (vector double)((vector unsigned long long)__a | ~__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_orc(vector bool long long __a, vector double __b) {
-  return (vector double)(__a | ~(vector unsigned long long)__b);
-}
-#endif
-
-/* vec_vor */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vor(vector signed char __a, vector signed char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vor(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a | __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vor(vector signed char __a, vector bool char __b) {
-  return __a | (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vor(vector unsigned char __a, vector unsigned char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vor(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a | __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vor(vector unsigned char __a, vector bool char __b) {
-  return __a | (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vor(vector bool char __a,
-                                                        vector bool char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vor(vector short __a,
-                                                    vector short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vor(vector bool short __a,
-                                                    vector short __b) {
-  return (vector short)__a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vor(vector short __a,
-                                                    vector bool short __b) {
-  return __a | (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vor(vector unsigned short __a, vector unsigned short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vor(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a | __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vor(vector unsigned short __a, vector bool short __b) {
-  return __a | (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vor(vector bool short __a, vector bool short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vor(vector int __a,
-                                                  vector int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vor(vector bool int __a,
-                                                  vector int __b) {
-  return (vector int)__a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vor(vector int __a,
-                                                  vector bool int __b) {
-  return __a | (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vor(vector unsigned int __a, vector unsigned int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vor(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a | __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vor(vector unsigned int __a, vector bool int __b) {
-  return __a | (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vor(vector bool int __a,
-                                                       vector bool int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vor(vector float __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vor(vector bool int __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vor(vector float __a,
-                                                    vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vor(vector signed long long __a, vector signed long long __b) {
-  return __a | __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vor(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a | __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vor(vector signed long long __a, vector bool long long __b) {
-  return __a | (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vor(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vor(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a | __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vor(vector unsigned long long __a, vector bool long long __b) {
-  return __a | (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vor(vector bool long long __a, vector bool long long __b) {
-  return __a | __b;
-}
-#endif
-
-/* vec_pack */
-
-/* The various vector pack instructions have a big-endian bias, so for
-   little endian we must handle reversed element numbering.  */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_pack(vector signed short __a, vector signed short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector signed char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector signed char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_pack(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector unsigned char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_pack(vector bool short __a, vector bool short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector bool char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_pack(vector int __a,
-                                                     vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_pack(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector unsigned short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_pack(vector bool int __a,
-                                                          vector bool int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector bool short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector signed int __ATTRS_o_ai
-vec_pack(vector signed long long __a, vector signed long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector signed int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector signed int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_pack(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector unsigned int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_pack(vector bool long long __a, vector bool long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector bool int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_pack(vector double __a, vector double __b) {
-  return (vector float) (__a[0], __a[1], __b[0], __b[1]);
-}
-#endif
-
-#ifdef __POWER9_VECTOR__
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_pack_to_short_fp32(vector float __a, vector float __b) {
-  vector float __resa = __builtin_vsx_xvcvsphp(__a);
-  vector float __resb = __builtin_vsx_xvcvsphp(__b);
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned short)vec_mergee(__resa, __resb);
-#else
-  return (vector unsigned short)vec_mergeo(__resa, __resb);
-#endif
-}
-
-#endif
-/* vec_vpkuhum */
-
-#define __builtin_altivec_vpkuhum vec_vpkuhum
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vpkuhum(vector signed short __a, vector signed short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector signed char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector signed char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vpkuhum(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector unsigned char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vpkuhum(vector bool short __a, vector bool short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector bool char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-/* vec_vpkuwum */
-
-#define __builtin_altivec_vpkuwum vec_vpkuwum
-
-static __inline__ vector short __ATTRS_o_ai vec_vpkuwum(vector int __a,
-                                                        vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vpkuwum(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector unsigned short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vpkuwum(vector bool int __a, vector bool int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector bool short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-/* vec_vpkudum */
-
-#ifdef __POWER8_VECTOR__
-#define __builtin_altivec_vpkudum vec_vpkudum
-
-static __inline__ vector int __ATTRS_o_ai vec_vpkudum(vector long long __a,
-                                                      vector long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vpkudum(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector unsigned int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vpkudum(vector bool long long __a, vector bool long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)vec_perm(
-      (vector long long)__a, (vector long long)__b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector bool int)vec_perm(
-      (vector long long)__a, (vector long long)__b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-#endif
-
-/* vec_packpx */
-
-static __inline__ vector pixel __attribute__((__always_inline__))
-vec_packpx(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector pixel)__builtin_altivec_vpkpx(__b, __a);
-#else
-  return (vector pixel)__builtin_altivec_vpkpx(__a, __b);
-#endif
-}
-
-/* vec_vpkpx */
-
-static __inline__ vector pixel __attribute__((__always_inline__))
-vec_vpkpx(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector pixel)__builtin_altivec_vpkpx(__b, __a);
-#else
-  return (vector pixel)__builtin_altivec_vpkpx(__a, __b);
-#endif
-}
-
-/* vec_packs */
-
-static __inline__ vector signed char __ATTRS_o_ai vec_packs(vector short __a,
-                                                            vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkshss(__b, __a);
-#else
-  return __builtin_altivec_vpkshss(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_packs(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuhus(__b, __a);
-#else
-  return __builtin_altivec_vpkuhus(__a, __b);
-#endif
-}
-
-static __inline__ vector signed short __ATTRS_o_ai vec_packs(vector int __a,
-                                                             vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkswss(__b, __a);
-#else
-  return __builtin_altivec_vpkswss(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_packs(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuwus(__b, __a);
-#else
-  return __builtin_altivec_vpkuwus(__a, __b);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector int __ATTRS_o_ai vec_packs(vector long long __a,
-                                                    vector long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpksdss(__b, __a);
-#else
-  return __builtin_altivec_vpksdss(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_packs(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkudus(__b, __a);
-#else
-  return __builtin_altivec_vpkudus(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vpkshss */
-
-static __inline__ vector signed char __attribute__((__always_inline__))
-vec_vpkshss(vector short __a, vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkshss(__b, __a);
-#else
-  return __builtin_altivec_vpkshss(__a, __b);
-#endif
-}
-
-/* vec_vpksdss */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector int __ATTRS_o_ai vec_vpksdss(vector long long __a,
-                                                      vector long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpksdss(__b, __a);
-#else
-  return __builtin_altivec_vpksdss(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vpkuhus */
-
-static __inline__ vector unsigned char __attribute__((__always_inline__))
-vec_vpkuhus(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuhus(__b, __a);
-#else
-  return __builtin_altivec_vpkuhus(__a, __b);
-#endif
-}
-
-/* vec_vpkudus */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vpkudus(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkudus(__b, __a);
-#else
-  return __builtin_altivec_vpkudus(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vpkswss */
-
-static __inline__ vector signed short __attribute__((__always_inline__))
-vec_vpkswss(vector int __a, vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkswss(__b, __a);
-#else
-  return __builtin_altivec_vpkswss(__a, __b);
-#endif
-}
-
-/* vec_vpkuwus */
-
-static __inline__ vector unsigned short __attribute__((__always_inline__))
-vec_vpkuwus(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuwus(__b, __a);
-#else
-  return __builtin_altivec_vpkuwus(__a, __b);
-#endif
-}
-
-/* vec_packsu */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_packsu(vector short __a, vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkshus(__b, __a);
-#else
-  return __builtin_altivec_vpkshus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_packsu(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuhus(__b, __a);
-#else
-  return __builtin_altivec_vpkuhus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_packsu(vector int __a, vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkswus(__b, __a);
-#else
-  return __builtin_altivec_vpkswus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_packsu(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuwus(__b, __a);
-#else
-  return __builtin_altivec_vpkuwus(__a, __b);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_packsu(vector long long __a, vector long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpksdus(__b, __a);
-#else
-  return __builtin_altivec_vpksdus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_packsu(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkudus(__b, __a);
-#else
-  return __builtin_altivec_vpkudus(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vpkshus */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vpkshus(vector short __a, vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkshus(__b, __a);
-#else
-  return __builtin_altivec_vpkshus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vpkshus(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuhus(__b, __a);
-#else
-  return __builtin_altivec_vpkuhus(__a, __b);
-#endif
-}
-
-/* vec_vpkswus */
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vpkswus(vector int __a, vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkswus(__b, __a);
-#else
-  return __builtin_altivec_vpkswus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vpkswus(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuwus(__b, __a);
-#else
-  return __builtin_altivec_vpkuwus(__a, __b);
-#endif
-}
-
-/* vec_vpksdus */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vpksdus(vector long long __a, vector long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpksdus(__b, __a);
-#else
-  return __builtin_altivec_vpksdus(__a, __b);
-#endif
-}
-#endif
-
-/* vec_perm */
-
-// The vperm instruction is defined architecturally with a big-endian bias.
-// For little endian, we swap the input operands and invert the permute
-// control vector.  Only the rightmost 5 bits matter, so we could use
-// a vector of all 31s instead of all 255s to perform the inversion.
-// However, when the PCV is not a constant, using 255 has an advantage
-// in that the vec_xor can be recognized as a vec_nor (and for P8 and
-// later, possibly a vec_nand).
-
-static __inline__ vector signed char __ATTRS_o_ai vec_perm(
-    vector signed char __a, vector signed char __b, vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector signed char)__builtin_altivec_vperm_4si((vector int)__b,
-                                                         (vector int)__a, __d);
-#else
-  return (vector signed char)__builtin_altivec_vperm_4si((vector int)__a,
-                                                         (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_perm(vector unsigned char __a, vector unsigned char __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector unsigned char)__builtin_altivec_vperm_4si(
-      (vector int)__b, (vector int)__a, __d);
-#else
-  return (vector unsigned char)__builtin_altivec_vperm_4si(
-      (vector int)__a, (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector bool char)__builtin_altivec_vperm_4si((vector int)__b,
-                                                       (vector int)__a, __d);
-#else
-  return (vector bool char)__builtin_altivec_vperm_4si((vector int)__a,
-                                                       (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_perm(vector signed short __a,
-                                                     vector signed short __b,
-                                                     vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector signed short)__builtin_altivec_vperm_4si((vector int)__b,
-                                                          (vector int)__a, __d);
-#else
-  return (vector signed short)__builtin_altivec_vperm_4si((vector int)__a,
-                                                          (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_perm(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector unsigned short)__builtin_altivec_vperm_4si(
-      (vector int)__b, (vector int)__a, __d);
-#else
-  return (vector unsigned short)__builtin_altivec_vperm_4si(
-      (vector int)__a, (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_perm(
-    vector bool short __a, vector bool short __b, vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector bool short)__builtin_altivec_vperm_4si((vector int)__b,
-                                                        (vector int)__a, __d);
-#else
-  return (vector bool short)__builtin_altivec_vperm_4si((vector int)__a,
-                                                        (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_perm(vector pixel __a,
-                                                     vector pixel __b,
-                                                     vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector pixel)__builtin_altivec_vperm_4si((vector int)__b,
-                                                   (vector int)__a, __d);
-#else
-  return (vector pixel)__builtin_altivec_vperm_4si((vector int)__a,
-                                                   (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_perm(vector signed int __a,
-                                                   vector signed int __b,
-                                                   vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector signed int)__builtin_altivec_vperm_4si(__b, __a, __d);
-#else
-  return (vector signed int)__builtin_altivec_vperm_4si(__a, __b, __c);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_perm(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)__b,
-                                                          (vector int)__a, __d);
-#else
-  return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)__a,
-                                                          (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector bool int)__builtin_altivec_vperm_4si((vector int)__b,
-                                                      (vector int)__a, __d);
-#else
-  return (vector bool int)__builtin_altivec_vperm_4si((vector int)__a,
-                                                      (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_perm(vector float __a,
-                                                     vector float __b,
-                                                     vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector float)__builtin_altivec_vperm_4si((vector int)__b,
-                                                   (vector int)__a, __d);
-#else
-  return (vector float)__builtin_altivec_vperm_4si((vector int)__a,
-                                                   (vector int)__b, __c);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector long long __ATTRS_o_ai
-vec_perm(vector signed long long __a, vector signed long long __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector signed long long)__builtin_altivec_vperm_4si(
-      (vector int)__b, (vector int)__a, __d);
-#else
-  return (vector signed long long)__builtin_altivec_vperm_4si(
-      (vector int)__a, (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_perm(vector unsigned long long __a, vector unsigned long long __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector unsigned long long)__builtin_altivec_vperm_4si(
-      (vector int)__b, (vector int)__a, __d);
-#else
-  return (vector unsigned long long)__builtin_altivec_vperm_4si(
-      (vector int)__a, (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_perm(vector bool long long __a, vector bool long long __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector bool long long)__builtin_altivec_vperm_4si(
-      (vector int)__b, (vector int)__a, __d);
-#else
-  return (vector bool long long)__builtin_altivec_vperm_4si(
-      (vector int)__a, (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_perm(vector double __a, vector double __b, vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector double)__builtin_altivec_vperm_4si((vector int)__b,
-                                                    (vector int)__a, __d);
-#else
-  return (vector double)__builtin_altivec_vperm_4si((vector int)__a,
-                                                    (vector int)__b, __c);
-#endif
-}
-#endif
-
-/* vec_vperm */
-
-static __inline__ vector signed char __ATTRS_o_ai vec_vperm(
-    vector signed char __a, vector signed char __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vperm(vector unsigned char __a, vector unsigned char __b,
-          vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vperm(
-    vector bool char __a, vector bool char __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vperm(vector short __a, vector short __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vperm(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_vperm(
-    vector bool short __a, vector bool short __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai
-vec_vperm(vector pixel __a, vector pixel __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vperm(vector int __a,
-                                                    vector int __b,
-                                                    vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vperm(vector unsigned int __a, vector unsigned int __b,
-          vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vperm(vector bool int __a, vector bool int __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_vperm(vector float __a, vector float __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-#ifdef __VSX__
-static __inline__ vector long long __ATTRS_o_ai vec_vperm(
-    vector long long __a, vector long long __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vperm(vector unsigned long long __a, vector unsigned long long __b,
-          vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_vperm(vector double __a, vector double __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-#endif
-
-/* vec_re */
-
-static __inline__ vector float __ATTRS_o_ai vec_re(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvresp(__a);
-#else
-  return __builtin_altivec_vrefp(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_re(vector double __a) {
-  return __builtin_vsx_xvredp(__a);
-}
-#endif
-
-/* vec_vrefp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vrefp(vector float __a) {
-  return __builtin_altivec_vrefp(__a);
-}
-
-/* vec_rl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_rl(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vrlb((vector char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_rl(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vrlb((vector char)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_rl(vector short __a,
-                                                   vector unsigned short __b) {
-  return __builtin_altivec_vrlh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_rl(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vrlh((vector short)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_rl(vector int __a,
-                                                 vector unsigned int __b) {
-  return __builtin_altivec_vrlw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_rl(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vrlw((vector int)__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_rl(vector signed long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vrld(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vrld(__a, __b);
-}
-#endif
-
-/* vec_rlmi */
-#ifdef __POWER9_VECTOR__
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_rlmi(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned int __c) {
-  return __builtin_altivec_vrlwmi(__a, __c, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_rlmi(vector unsigned long long __a, vector unsigned long long __b,
-         vector unsigned long long __c) {
-  return __builtin_altivec_vrldmi(__a, __c, __b);
-}
-
-/* vec_rlnm */
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_rlnm(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned int __c) {
-  vector unsigned int OneByte = { 0x8, 0x8, 0x8, 0x8 };
-  return __builtin_altivec_vrlwnm(__a, ((__c << OneByte) | __b));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_rlnm(vector unsigned long long __a, vector unsigned long long __b,
-         vector unsigned long long __c) {
-  vector unsigned long long OneByte = { 0x8, 0x8 };
-  return __builtin_altivec_vrldnm(__a, ((__c << OneByte) | __b));
-}
-#endif
-
-/* vec_vrlb */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vrlb(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vrlb((vector char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vrlb(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vrlb((vector char)__a, __b);
-}
-
-/* vec_vrlh */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vrlh(vector short __a, vector unsigned short __b) {
-  return __builtin_altivec_vrlh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vrlh(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vrlh((vector short)__a, __b);
-}
-
-/* vec_vrlw */
-
-static __inline__ vector int __ATTRS_o_ai vec_vrlw(vector int __a,
-                                                   vector unsigned int __b) {
-  return __builtin_altivec_vrlw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vrlw(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vrlw((vector int)__a, __b);
-}
-
-/* vec_round */
-
-static __inline__ vector float __ATTRS_o_ai vec_round(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvrspi(__a);
-#else
-  return __builtin_altivec_vrfin(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_round(vector double __a) {
-  return __builtin_vsx_xvrdpi(__a);
-}
-
-/* vec_rint */
-
-static __inline__ vector float __ATTRS_o_ai vec_rint(vector float __a) {
-  return __builtin_vsx_xvrspic(__a);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_rint(vector double __a) {
-  return __builtin_vsx_xvrdpic(__a);
-}
-
-/* vec_nearbyint */
-
-static __inline__ vector float __ATTRS_o_ai vec_nearbyint(vector float __a) {
-  return __builtin_vsx_xvrspi(__a);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_nearbyint(vector double __a) {
-  return __builtin_vsx_xvrdpi(__a);
-}
-#endif
-
-/* vec_vrfin */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vrfin(vector float __a) {
-  return __builtin_altivec_vrfin(__a);
-}
-
-/* vec_sqrt */
-
-#ifdef __VSX__
-static __inline__ vector float __ATTRS_o_ai vec_sqrt(vector float __a) {
-  return __builtin_vsx_xvsqrtsp(__a);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_sqrt(vector double __a) {
-  return __builtin_vsx_xvsqrtdp(__a);
-}
-#endif
-
-/* vec_rsqrte */
-
-static __inline__ vector float __ATTRS_o_ai vec_rsqrte(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvrsqrtesp(__a);
-#else
-  return __builtin_altivec_vrsqrtefp(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_rsqrte(vector double __a) {
-  return __builtin_vsx_xvrsqrtedp(__a);
-}
-#endif
-
-/* vec_vrsqrtefp */
-
-static __inline__ __vector float __attribute__((__always_inline__))
-vec_vrsqrtefp(vector float __a) {
-  return __builtin_altivec_vrsqrtefp(__a);
-}
-
-/* vec_sel */
-
-#define __builtin_altivec_vsel_4si vec_sel
-
-static __inline__ vector signed char __ATTRS_o_ai vec_sel(
-    vector signed char __a, vector signed char __b, vector unsigned char __c) {
-  return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sel(vector signed char __a, vector signed char __b, vector bool char __c) {
-  return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sel(vector unsigned char __a, vector unsigned char __b,
-        vector unsigned char __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai vec_sel(
-    vector unsigned char __a, vector unsigned char __b, vector bool char __c) {
-  return (__a & ~(vector unsigned char)__c) | (__b & (vector unsigned char)__c);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_sel(vector bool char __a, vector bool char __b, vector unsigned char __c) {
-  return (__a & ~(vector bool char)__c) | (__b & (vector bool char)__c);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_sel(vector bool char __a,
-                                                        vector bool char __b,
-                                                        vector bool char __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sel(vector short __a,
-                                                    vector short __b,
-                                                    vector unsigned short __c) {
-  return (__a & ~(vector short)__c) | (__b & (vector short)__c);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sel(vector short __a,
-                                                    vector short __b,
-                                                    vector bool short __c) {
-  return (__a & ~(vector short)__c) | (__b & (vector short)__c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sel(vector unsigned short __a, vector unsigned short __b,
-        vector unsigned short __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sel(vector unsigned short __a, vector unsigned short __b,
-        vector bool short __c) {
-  return (__a & ~(vector unsigned short)__c) |
-         (__b & (vector unsigned short)__c);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_sel(
-    vector bool short __a, vector bool short __b, vector unsigned short __c) {
-  return (__a & ~(vector bool short)__c) | (__b & (vector bool short)__c);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_sel(vector bool short __a, vector bool short __b, vector bool short __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sel(vector int __a,
-                                                  vector int __b,
-                                                  vector unsigned int __c) {
-  return (__a & ~(vector int)__c) | (__b & (vector int)__c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sel(vector int __a,
-                                                  vector int __b,
-                                                  vector bool int __c) {
-  return (__a & ~(vector int)__c) | (__b & (vector int)__c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_sel(
-    vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sel(vector unsigned int __a, vector unsigned int __b, vector bool int __c) {
-  return (__a & ~(vector unsigned int)__c) | (__b & (vector unsigned int)__c);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_sel(vector bool int __a, vector bool int __b, vector unsigned int __c) {
-  return (__a & ~(vector bool int)__c) | (__b & (vector bool int)__c);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_sel(vector bool int __a,
-                                                       vector bool int __b,
-                                                       vector bool int __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_sel(vector float __a,
-                                                    vector float __b,
-                                                    vector unsigned int __c) {
-  vector int __res = ((vector int)__a & ~(vector int)__c) |
-                     ((vector int)__b & (vector int)__c);
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_sel(vector float __a,
-                                                    vector float __b,
-                                                    vector bool int __c) {
-  vector int __res = ((vector int)__a & ~(vector int)__c) |
-                     ((vector int)__b & (vector int)__c);
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai
-vec_sel(vector double __a, vector double __b, vector bool long long __c) {
-  vector long long __res = ((vector long long)__a & ~(vector long long)__c) |
-                           ((vector long long)__b & (vector long long)__c);
-  return (vector double)__res;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_sel(vector double __a, vector double __b, vector unsigned long long __c) {
-  vector long long __res = ((vector long long)__a & ~(vector long long)__c) |
-                           ((vector long long)__b & (vector long long)__c);
-  return (vector double)__res;
-}
-#endif
-
-/* vec_vsel */
-
-static __inline__ vector signed char __ATTRS_o_ai vec_vsel(
-    vector signed char __a, vector signed char __b, vector unsigned char __c) {
-  return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsel(vector signed char __a, vector signed char __b, vector bool char __c) {
-  return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsel(vector unsigned char __a, vector unsigned char __b,
-         vector unsigned char __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai vec_vsel(
-    vector unsigned char __a, vector unsigned char __b, vector bool char __c) {
-  return (__a & ~(vector unsigned char)__c) | (__b & (vector unsigned char)__c);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsel(vector bool char __a, vector bool char __b, vector unsigned char __c) {
-  return (__a & ~(vector bool char)__c) | (__b & (vector bool char)__c);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vsel(vector bool char __a,
-                                                         vector bool char __b,
-                                                         vector bool char __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vsel(vector short __a, vector short __b, vector unsigned short __c) {
-  return (__a & ~(vector short)__c) | (__b & (vector short)__c);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsel(vector short __a,
-                                                     vector short __b,
-                                                     vector bool short __c) {
-  return (__a & ~(vector short)__c) | (__b & (vector short)__c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsel(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned short __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsel(vector unsigned short __a, vector unsigned short __b,
-         vector bool short __c) {
-  return (__a & ~(vector unsigned short)__c) |
-         (__b & (vector unsigned short)__c);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_vsel(
-    vector bool short __a, vector bool short __b, vector unsigned short __c) {
-  return (__a & ~(vector bool short)__c) | (__b & (vector bool short)__c);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsel(vector bool short __a, vector bool short __b, vector bool short __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsel(vector int __a,
-                                                   vector int __b,
-                                                   vector unsigned int __c) {
-  return (__a & ~(vector int)__c) | (__b & (vector int)__c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsel(vector int __a,
-                                                   vector int __b,
-                                                   vector bool int __c) {
-  return (__a & ~(vector int)__c) | (__b & (vector int)__c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_vsel(
-    vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_vsel(
-    vector unsigned int __a, vector unsigned int __b, vector bool int __c) {
-  return (__a & ~(vector unsigned int)__c) | (__b & (vector unsigned int)__c);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsel(vector bool int __a, vector bool int __b, vector unsigned int __c) {
-  return (__a & ~(vector bool int)__c) | (__b & (vector bool int)__c);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vsel(vector bool int __a,
-                                                        vector bool int __b,
-                                                        vector bool int __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsel(vector float __a,
-                                                     vector float __b,
-                                                     vector unsigned int __c) {
-  vector int __res = ((vector int)__a & ~(vector int)__c) |
-                     ((vector int)__b & (vector int)__c);
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsel(vector float __a,
-                                                     vector float __b,
-                                                     vector bool int __c) {
-  vector int __res = ((vector int)__a & ~(vector int)__c) |
-                     ((vector int)__b & (vector int)__c);
-  return (vector float)__res;
-}
-
-/* vec_sl */
-
-// vec_sl does modulo arithmetic on __b first, so __b is allowed to be more
-// than the length of __a.
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sl(vector unsigned char __a, vector unsigned char __b) {
-  return __a << (__b %
-                 (vector unsigned char)(sizeof(unsigned char) * __CHAR_BIT__));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sl(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)vec_sl((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sl(vector unsigned short __a, vector unsigned short __b) {
-  return __a << (__b % (vector unsigned short)(sizeof(unsigned short) *
-                                               __CHAR_BIT__));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sl(vector short __a,
-                                                   vector unsigned short __b) {
-  return (vector short)vec_sl((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sl(vector unsigned int __a, vector unsigned int __b) {
-  return __a << (__b %
-                 (vector unsigned int)(sizeof(unsigned int) * __CHAR_BIT__));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sl(vector int __a,
-                                                 vector unsigned int __b) {
-  return (vector int)vec_sl((vector unsigned int)__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sl(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a << (__b % (vector unsigned long long)(sizeof(unsigned long long) *
-                                                   __CHAR_BIT__));
-}
-
-static __inline__ vector long long __ATTRS_o_ai
-vec_sl(vector long long __a, vector unsigned long long __b) {
-  return (vector long long)vec_sl((vector unsigned long long)__a, __b);
-}
-#endif
-
-/* vec_vslb */
-
-#define __builtin_altivec_vslb vec_vslb
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vslb(vector signed char __a, vector unsigned char __b) {
-  return vec_sl(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vslb(vector unsigned char __a, vector unsigned char __b) {
-  return vec_sl(__a, __b);
-}
-
-/* vec_vslh */
-
-#define __builtin_altivec_vslh vec_vslh
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vslh(vector short __a, vector unsigned short __b) {
-  return vec_sl(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vslh(vector unsigned short __a, vector unsigned short __b) {
-  return vec_sl(__a, __b);
-}
-
-/* vec_vslw */
-
-#define __builtin_altivec_vslw vec_vslw
-
-static __inline__ vector int __ATTRS_o_ai vec_vslw(vector int __a,
-                                                   vector unsigned int __b) {
-  return vec_sl(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vslw(vector unsigned int __a, vector unsigned int __b) {
-  return vec_sl(__a, __b);
-}
-
-/* vec_sld */
-
-#define __builtin_altivec_vsldoi_4si vec_sld
-
-static __inline__ vector signed char __ATTRS_o_ai vec_sld(
-    vector signed char __a, vector signed char __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sld(vector unsigned char __a, vector unsigned char __b,
-        unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_sld(vector bool char __a, vector bool char __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector signed short __ATTRS_o_ai vec_sld(
-    vector signed short __a, vector signed short __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sld(vector unsigned short __a, vector unsigned short __b,
-        unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_sld(vector bool short __a, vector bool short __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sld(vector pixel __a,
-                                                    vector pixel __b,
-                                                    unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_sld(vector signed int __a, vector signed int __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_sld(
-    vector unsigned int __a, vector unsigned int __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_sld(vector bool int __a,
-                                                       vector bool int __b,
-                                                       unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_sld(vector float __a,
-                                                    vector float __b,
-                                                    unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_sld(vector bool long long __a, vector bool long long __b,
-        unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sld(vector signed long long __a, vector signed long long __b,
-        unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sld(vector unsigned long long __a, vector unsigned long long __b,
-        unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_sld(vector double __a,
-                                                     vector double __b,
-                                                     unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-#endif
-
-/* vec_sldw */
-static __inline__ vector signed char __ATTRS_o_ai vec_sldw(
-    vector signed char __a, vector signed char __b, unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sldw(vector unsigned char __a, vector unsigned char __b,
-         unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector signed short __ATTRS_o_ai vec_sldw(
-    vector signed short __a, vector signed short __b, unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sldw(vector unsigned short __a, vector unsigned short __b,
-         unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_sldw(vector signed int __a, vector signed int __b, unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_sldw(
-    vector unsigned int __a, vector unsigned int __b, unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sldw(vector signed long long __a, vector signed long long __b,
-         unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sldw(vector unsigned long long __a, vector unsigned long long __b,
-         unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-#endif
-
-#ifdef __POWER9_VECTOR__
-/* vec_slv */
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_slv(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vslv(__a, __b);
-}
-
-/* vec_srv */
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_srv(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsrv(__a, __b);
-}
-#endif
-
-/* vec_vsldoi */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsldoi(vector signed char __a, vector signed char __b, unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai vec_vsldoi(
-    vector unsigned char __a, vector unsigned char __b, unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsldoi(vector short __a,
-                                                       vector short __b,
-                                                       unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai vec_vsldoi(
-    vector unsigned short __a, vector unsigned short __b, unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsldoi(vector pixel __a,
-                                                       vector pixel __b,
-                                                       unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsldoi(vector int __a,
-                                                     vector int __b,
-                                                     unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_vsldoi(
-    vector unsigned int __a, vector unsigned int __b, unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsldoi(vector float __a,
-                                                       vector float __b,
-                                                       unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-/* vec_sll */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sll(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sll(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sll(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sll(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sll(vector unsigned char __a, vector unsigned short __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sll(vector unsigned char __a, vector unsigned int __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_sll(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_sll(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_sll(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a,
-                                                    vector unsigned short __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a,
-                                                    vector unsigned int __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sll(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sll(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sll(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_sll(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_sll(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_sll(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
-                                                    vector unsigned short __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
-                                                    vector unsigned int __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a,
-                                                  vector unsigned short __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a,
-                                                  vector unsigned int __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sll(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sll(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sll(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_sll(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_sll(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_sll(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sll(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_altivec_vsl((vector int)__a,
-                                                        (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sll(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_altivec_vsl((vector int)__a,
-                                                          (vector int)__b);
-}
-#endif
-
-/* vec_vsl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsl(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsl(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsl(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsl(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsl(vector unsigned char __a, vector unsigned short __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsl(vector unsigned char __a, vector unsigned int __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsl(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsl(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsl(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a,
-                                                    vector unsigned short __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a,
-                                                    vector unsigned int __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsl(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsl(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsl(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsl(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsl(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsl(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
-                                                    vector unsigned short __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
-                                                    vector unsigned int __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a,
-                                                  vector unsigned short __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a,
-                                                  vector unsigned int __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsl(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsl(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsl(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsl(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsl(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsl(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-/* vec_slo */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_slo(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_altivec_vslo((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_slo(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vslo((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_slo(vector unsigned char __a, vector signed char __b) {
-  return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_slo(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_slo(vector short __a,
-                                                    vector signed char __b) {
-  return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_slo(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_slo(vector unsigned short __a, vector signed char __b) {
-  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_slo(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_slo(vector pixel __a,
-                                                    vector signed char __b) {
-  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_slo(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_slo(vector int __a,
-                                                  vector signed char __b) {
-  return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_slo(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_slo(vector unsigned int __a, vector signed char __b) {
-  return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_slo(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_slo(vector float __a,
-                                                    vector signed char __b) {
-  return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_slo(vector float __a,
-                                                    vector unsigned char __b) {
-  return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_slo(vector signed long long __a, vector signed char __b) {
-  return (vector signed long long)__builtin_altivec_vslo((vector int)__a,
-                                                         (vector int)__b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_slo(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_altivec_vslo((vector int)__a,
-                                                         (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_slo(vector unsigned long long __a, vector signed char __b) {
-  return (vector unsigned long long)__builtin_altivec_vslo((vector int)__a,
-                                                           (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_slo(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_altivec_vslo((vector int)__a,
-                                                           (vector int)__b);
-}
-#endif
-
-/* vec_vslo */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vslo(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_altivec_vslo((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vslo(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vslo((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vslo(vector unsigned char __a, vector signed char __b) {
-  return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vslo(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vslo(vector short __a,
-                                                     vector signed char __b) {
-  return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vslo(vector short __a,
-                                                     vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vslo(vector unsigned short __a, vector signed char __b) {
-  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vslo(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a,
-                                                     vector signed char __b) {
-  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a,
-                                                     vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vslo(vector int __a,
-                                                   vector signed char __b) {
-  return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vslo(vector int __a,
-                                                   vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vslo(vector unsigned int __a, vector signed char __b) {
-  return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vslo(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vslo(vector float __a,
-                                                     vector signed char __b) {
-  return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vslo(vector float __a,
-                                                     vector unsigned char __b) {
-  return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-/* vec_splat */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_splat(vector signed char __a, unsigned const int __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_splat(vector unsigned char __a, unsigned const int __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_splat(vector bool char __a, unsigned const int __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_splat(vector signed short __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x07) * 2;
-  unsigned char b1 = b0 + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
-                                         b0, b1, b0, b1, b0, b1));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_splat(vector unsigned short __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x07) * 2;
-  unsigned char b1 = b0 + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
-                                         b0, b1, b0, b1, b0, b1));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_splat(vector bool short __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x07) * 2;
-  unsigned char b1 = b0 + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
-                                         b0, b1, b0, b1, b0, b1));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_splat(vector pixel __a,
-                                                      unsigned const int __b) {
-  unsigned char b0 = (__b & 0x07) * 2;
-  unsigned char b1 = b0 + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
-                                         b0, b1, b0, b1, b0, b1));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_splat(vector signed int __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x03) * 4;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
-                                         b2, b3, b0, b1, b2, b3));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_splat(vector unsigned int __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x03) * 4;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
-                                         b2, b3, b0, b1, b2, b3));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_splat(vector bool int __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x03) * 4;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
-                                         b2, b3, b0, b1, b2, b3));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_splat(vector float __a,
-                                                      unsigned const int __b) {
-  unsigned char b0 = (__b & 0x03) * 4;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
-                                         b2, b3, b0, b1, b2, b3));
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_splat(vector double __a,
-                                                       unsigned const int __b) {
-  unsigned char b0 = (__b & 0x01) * 8;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
-                b6 = b0 + 6, b7 = b0 + 7;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
-                                         b2, b3, b4, b5, b6, b7));
-}
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_splat(vector bool long long __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x01) * 8;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
-                b6 = b0 + 6, b7 = b0 + 7;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
-                                         b2, b3, b4, b5, b6, b7));
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_splat(vector signed long long __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x01) * 8;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
-                b6 = b0 + 6, b7 = b0 + 7;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
-                                         b2, b3, b4, b5, b6, b7));
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_splat(vector unsigned long long __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x01) * 8;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
-                b6 = b0 + 6, b7 = b0 + 7;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
-                                         b2, b3, b4, b5, b6, b7));
-}
-#endif
-
-/* vec_vspltb */
-
-#define __builtin_altivec_vspltb vec_vspltb
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vspltb(vector signed char __a, unsigned char __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vspltb(vector unsigned char __a, unsigned char __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vspltb(vector bool char __a,
-                                                           unsigned char __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b));
-}
-
-/* vec_vsplth */
-
-#define __builtin_altivec_vsplth vec_vsplth
-
-static __inline__ vector short __ATTRS_o_ai vec_vsplth(vector short __a,
-                                                       unsigned char __b) {
-  __b *= 2;
-  unsigned char b1 = __b + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
-                                         __b, b1, __b, b1, __b, b1, __b, b1));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsplth(vector unsigned short __a, unsigned char __b) {
-  __b *= 2;
-  unsigned char b1 = __b + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
-                                         __b, b1, __b, b1, __b, b1, __b, b1));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsplth(vector bool short __a, unsigned char __b) {
-  __b *= 2;
-  unsigned char b1 = __b + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
-                                         __b, b1, __b, b1, __b, b1, __b, b1));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsplth(vector pixel __a,
-                                                       unsigned char __b) {
-  __b *= 2;
-  unsigned char b1 = __b + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
-                                         __b, b1, __b, b1, __b, b1, __b, b1));
-}
-
-/* vec_vspltw */
-
-#define __builtin_altivec_vspltw vec_vspltw
-
-static __inline__ vector int __ATTRS_o_ai vec_vspltw(vector int __a,
-                                                     unsigned char __b) {
-  __b *= 4;
-  unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
-                                         b1, b2, b3, __b, b1, b2, b3));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vspltw(vector unsigned int __a, unsigned char __b) {
-  __b *= 4;
-  unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
-                                         b1, b2, b3, __b, b1, b2, b3));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vspltw(vector bool int __a,
-                                                          unsigned char __b) {
-  __b *= 4;
-  unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
-                                         b1, b2, b3, __b, b1, b2, b3));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vspltw(vector float __a,
-                                                       unsigned char __b) {
-  __b *= 4;
-  unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
-                                         b1, b2, b3, __b, b1, b2, b3));
-}
-
-/* vec_splat_s8 */
-
-#define __builtin_altivec_vspltisb vec_splat_s8
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector signed char __ATTRS_o_ai
-vec_splat_s8(signed char __a) {
-  return (vector signed char)(__a);
-}
-
-/* vec_vspltisb */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vspltisb(signed char __a) {
-  return (vector signed char)(__a);
-}
-
-/* vec_splat_s16 */
-
-#define __builtin_altivec_vspltish vec_splat_s16
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector short __ATTRS_o_ai vec_splat_s16(signed char __a) {
-  return (vector short)(__a);
-}
-
-/* vec_vspltish */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector short __ATTRS_o_ai vec_vspltish(signed char __a) {
-  return (vector short)(__a);
-}
-
-/* vec_splat_s32 */
-
-#define __builtin_altivec_vspltisw vec_splat_s32
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector int __ATTRS_o_ai vec_splat_s32(signed char __a) {
-  return (vector int)(__a);
-}
-
-/* vec_vspltisw */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector int __ATTRS_o_ai vec_vspltisw(signed char __a) {
-  return (vector int)(__a);
-}
-
-/* vec_splat_u8 */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_splat_u8(unsigned char __a) {
-  return (vector unsigned char)(__a);
-}
-
-/* vec_splat_u16 */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_splat_u16(signed char __a) {
-  return (vector unsigned short)(__a);
-}
-
-/* vec_splat_u32 */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_splat_u32(signed char __a) {
-  return (vector unsigned int)(__a);
-}
-
-/* vec_sr */
-
-// vec_sr does modulo arithmetic on __b first, so __b is allowed to be more
-// than the length of __a.
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sr(vector unsigned char __a, vector unsigned char __b) {
-  return __a >>
-         (__b % (vector unsigned char)(sizeof(unsigned char) * __CHAR_BIT__));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sr(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)vec_sr((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sr(vector unsigned short __a, vector unsigned short __b) {
-  return __a >>
-         (__b % (vector unsigned short)(sizeof(unsigned short) * __CHAR_BIT__));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sr(vector short __a,
-                                                   vector unsigned short __b) {
-  return (vector short)vec_sr((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sr(vector unsigned int __a, vector unsigned int __b) {
-  return __a >>
-         (__b % (vector unsigned int)(sizeof(unsigned int) * __CHAR_BIT__));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sr(vector int __a,
-                                                 vector unsigned int __b) {
-  return (vector int)vec_sr((vector unsigned int)__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sr(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a >> (__b % (vector unsigned long long)(sizeof(unsigned long long) *
-                                                   __CHAR_BIT__));
-}
-
-static __inline__ vector long long __ATTRS_o_ai
-vec_sr(vector long long __a, vector unsigned long long __b) {
-  return (vector long long)vec_sr((vector unsigned long long)__a, __b);
-}
-#endif
-
-/* vec_vsrb */
-
-#define __builtin_altivec_vsrb vec_vsrb
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsrb(vector signed char __a, vector unsigned char __b) {
-  return vec_sr(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsrb(vector unsigned char __a, vector unsigned char __b) {
-  return vec_sr(__a, __b);
-}
-
-/* vec_vsrh */
-
-#define __builtin_altivec_vsrh vec_vsrh
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vsrh(vector short __a, vector unsigned short __b) {
-  return vec_sr(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsrh(vector unsigned short __a, vector unsigned short __b) {
-  return vec_sr(__a, __b);
-}
-
-/* vec_vsrw */
-
-#define __builtin_altivec_vsrw vec_vsrw
-
-static __inline__ vector int __ATTRS_o_ai vec_vsrw(vector int __a,
-                                                   vector unsigned int __b) {
-  return vec_sr(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsrw(vector unsigned int __a, vector unsigned int __b) {
-  return vec_sr(__a, __b);
-}
-
-/* vec_sra */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sra(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsrab((vector char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sra(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsrab((vector char)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sra(vector short __a,
-                                                    vector unsigned short __b) {
-  return __builtin_altivec_vsrah(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sra(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsrah((vector short)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sra(vector int __a,
-                                                  vector unsigned int __b) {
-  return __builtin_altivec_vsraw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sra(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsraw((vector int)__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sra(vector signed long long __a, vector unsigned long long __b) {
-  return __a >> __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sra(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)((vector signed long long)__a >> __b);
-}
-#endif
-
-/* vec_vsrab */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsrab(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsrab((vector char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsrab(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsrab((vector char)__a, __b);
-}
-
-/* vec_vsrah */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vsrah(vector short __a, vector unsigned short __b) {
-  return __builtin_altivec_vsrah(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsrah(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsrah((vector short)__a, __b);
-}
-
-/* vec_vsraw */
-
-static __inline__ vector int __ATTRS_o_ai vec_vsraw(vector int __a,
-                                                    vector unsigned int __b) {
-  return __builtin_altivec_vsraw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsraw(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsraw((vector int)__a, __b);
-}
-
-/* vec_srl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_srl(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_srl(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_srl(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_srl(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_srl(vector unsigned char __a, vector unsigned short __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_srl(vector unsigned char __a, vector unsigned int __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_srl(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_srl(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_srl(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a,
-                                                    vector unsigned short __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a,
-                                                    vector unsigned int __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_srl(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_srl(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_srl(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_srl(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_srl(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_srl(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
-                                                    vector unsigned short __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
-                                                    vector unsigned int __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a,
-                                                  vector unsigned short __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a,
-                                                  vector unsigned int __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_srl(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_srl(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_srl(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_srl(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_srl(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_srl(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_srl(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_altivec_vsr((vector int)__a,
-                                                        (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_srl(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_altivec_vsr((vector int)__a,
-                                                          (vector int)__b);
-}
-#endif
-
-/* vec_vsr */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsr(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsr(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsr(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsr(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsr(vector unsigned char __a, vector unsigned short __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsr(vector unsigned char __a, vector unsigned int __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsr(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsr(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsr(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a,
-                                                    vector unsigned short __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a,
-                                                    vector unsigned int __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsr(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsr(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsr(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsr(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsr(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsr(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
-                                                    vector unsigned short __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
-                                                    vector unsigned int __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a,
-                                                  vector unsigned short __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a,
-                                                  vector unsigned int __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsr(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsr(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsr(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsr(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsr(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsr(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-/* vec_sro */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sro(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_altivec_vsro((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sro(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsro((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sro(vector unsigned char __a, vector signed char __b) {
-  return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sro(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sro(vector short __a,
-                                                    vector signed char __b) {
-  return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sro(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sro(vector unsigned short __a, vector signed char __b) {
-  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sro(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sro(vector pixel __a,
-                                                    vector signed char __b) {
-  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sro(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sro(vector int __a,
-                                                  vector signed char __b) {
-  return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sro(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sro(vector unsigned int __a, vector signed char __b) {
-  return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sro(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_sro(vector float __a,
-                                                    vector signed char __b) {
-  return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_sro(vector float __a,
-                                                    vector unsigned char __b) {
-  return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sro(vector signed long long __a, vector signed char __b) {
-  return (vector signed long long)__builtin_altivec_vsro((vector int)__a,
-                                                         (vector int)__b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sro(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_altivec_vsro((vector int)__a,
-                                                         (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sro(vector unsigned long long __a, vector signed char __b) {
-  return (vector unsigned long long)__builtin_altivec_vsro((vector int)__a,
-                                                           (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sro(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_altivec_vsro((vector int)__a,
-                                                           (vector int)__b);
-}
-#endif
-
-/* vec_vsro */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsro(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_altivec_vsro((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsro(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsro((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsro(vector unsigned char __a, vector signed char __b) {
-  return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsro(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsro(vector short __a,
-                                                     vector signed char __b) {
-  return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsro(vector short __a,
-                                                     vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsro(vector unsigned short __a, vector signed char __b) {
-  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsro(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a,
-                                                     vector signed char __b) {
-  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a,
-                                                     vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsro(vector int __a,
-                                                   vector signed char __b) {
-  return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsro(vector int __a,
-                                                   vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsro(vector unsigned int __a, vector signed char __b) {
-  return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsro(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsro(vector float __a,
-                                                     vector signed char __b) {
-  return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsro(vector float __a,
-                                                     vector unsigned char __b) {
-  return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-/* vec_st */
-
-static __inline__ void __ATTRS_o_ai vec_st(vector signed char __a, int __b,
-                                           vector signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector signed char __a, int __b,
-                                           signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned char __a, int __b,
-                                           vector unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned char __a, int __b,
-                                           unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
-                                           signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
-                                           unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
-                                           vector bool char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector short __a, int __b,
-                                           vector short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector short __a, int __b,
-                                           short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned short __a, int __b,
-                                           vector unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned short __a, int __b,
-                                           unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
-                                           short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
-                                           unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
-                                           vector bool short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, int __b,
-                                           short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, int __b,
-                                           unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, int __b,
-                                           vector pixel *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector int __a, int __b,
-                                           vector int *__c) {
-  __builtin_altivec_stvx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector int __a, int __b, int *__c) {
-  __builtin_altivec_stvx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned int __a, int __b,
-                                           vector unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned int __a, int __b,
-                                           unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
-                                           int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
-                                           unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
-                                           vector bool int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector float __a, int __b,
-                                           vector float *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector float __a, int __b,
-                                           float *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-/* vec_stvx */
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector signed char __a, int __b,
-                                             vector signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector signed char __a, int __b,
-                                             signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned char __a, int __b,
-                                             vector unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned char __a, int __b,
-                                             unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
-                                             signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
-                                             unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
-                                             vector bool char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector short __a, int __b,
-                                             vector short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector short __a, int __b,
-                                             short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned short __a, int __b,
-                                             vector unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned short __a, int __b,
-                                             unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
-                                             short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
-                                             unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
-                                             vector bool short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
-                                             short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
-                                             unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
-                                             vector pixel *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector int __a, int __b,
-                                             vector int *__c) {
-  __builtin_altivec_stvx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector int __a, int __b,
-                                             int *__c) {
-  __builtin_altivec_stvx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned int __a, int __b,
-                                             vector unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned int __a, int __b,
-                                             unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
-                                             int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
-                                             unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
-                                             vector bool int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector float __a, int __b,
-                                             vector float *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector float __a, int __b,
-                                             float *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-/* vec_ste */
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector signed char __a, int __b,
-                                            signed char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned char __a, int __b,
-                                            unsigned char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool char __a, int __b,
-                                            signed char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool char __a, int __b,
-                                            unsigned char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector short __a, int __b,
-                                            short *__c) {
-  __builtin_altivec_stvehx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned short __a, int __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool short __a, int __b,
-                                            short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool short __a, int __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector pixel __a, int __b,
-                                            short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector pixel __a, int __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector int __a, int __b, int *__c) {
-  __builtin_altivec_stvewx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned int __a, int __b,
-                                            unsigned int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool int __a, int __b,
-                                            int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool int __a, int __b,
-                                            unsigned int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector float __a, int __b,
-                                            float *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-/* vec_stvebx */
-
-static __inline__ void __ATTRS_o_ai vec_stvebx(vector signed char __a, int __b,
-                                               signed char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvebx(vector unsigned char __a,
-                                               int __b, unsigned char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvebx(vector bool char __a, int __b,
-                                               signed char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvebx(vector bool char __a, int __b,
-                                               unsigned char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-/* vec_stvehx */
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector short __a, int __b,
-                                               short *__c) {
-  __builtin_altivec_stvehx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector unsigned short __a,
-                                               int __b, unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector bool short __a, int __b,
-                                               short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector bool short __a, int __b,
-                                               unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector pixel __a, int __b,
-                                               short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector pixel __a, int __b,
-                                               unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-/* vec_stvewx */
-
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector int __a, int __b,
-                                               int *__c) {
-  __builtin_altivec_stvewx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector unsigned int __a, int __b,
-                                               unsigned int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector bool int __a, int __b,
-                                               int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector bool int __a, int __b,
-                                               unsigned int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector float __a, int __b,
-                                               float *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-/* vec_stl */
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector signed char __a, int __b,
-                                            vector signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector signed char __a, int __b,
-                                            signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b,
-                                            vector unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b,
-                                            unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
-                                            signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
-                                            unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
-                                            vector bool char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector short __a, int __b,
-                                            vector short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector short __a, int __b,
-                                            short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b,
-                                            vector unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
-                                            short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
-                                            vector bool short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b,
-                                            short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b,
-                                            vector pixel *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector int __a, int __b,
-                                            vector int *__c) {
-  __builtin_altivec_stvxl(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector int __a, int __b, int *__c) {
-  __builtin_altivec_stvxl(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b,
-                                            vector unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b,
-                                            unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
-                                            int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
-                                            unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
-                                            vector bool int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector float __a, int __b,
-                                            vector float *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector float __a, int __b,
-                                            float *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-/* vec_stvxl */
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b,
-                                              vector signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b,
-                                              signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b,
-                                              vector unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b,
-                                              unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
-                                              signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
-                                              unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
-                                              vector bool char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector short __a, int __b,
-                                              vector short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector short __a, int __b,
-                                              short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned short __a,
-                                              int __b,
-                                              vector unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned short __a,
-                                              int __b, unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
-                                              short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
-                                              unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
-                                              vector bool short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
-                                              short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
-                                              unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
-                                              vector pixel *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector int __a, int __b,
-                                              vector int *__c) {
-  __builtin_altivec_stvxl(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector int __a, int __b,
-                                              int *__c) {
-  __builtin_altivec_stvxl(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b,
-                                              vector unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b,
-                                              unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
-                                              int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
-                                              unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
-                                              vector bool int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector float __a, int __b,
-                                              vector float *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector float __a, int __b,
-                                              float *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-/* vec_sub */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sub(vector signed char __a, vector signed char __b) {
-  return __a - __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sub(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a - __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sub(vector signed char __a, vector bool char __b) {
-  return __a - (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sub(vector unsigned char __a, vector unsigned char __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sub(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a - __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sub(vector unsigned char __a, vector bool char __b) {
-  return __a - (vector unsigned char)__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sub(vector short __a,
-                                                    vector short __b) {
-  return __a - __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sub(vector bool short __a,
-                                                    vector short __b) {
-  return (vector short)__a - __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sub(vector short __a,
-                                                    vector bool short __b) {
-  return __a - (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sub(vector unsigned short __a, vector unsigned short __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sub(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a - __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sub(vector unsigned short __a, vector bool short __b) {
-  return __a - (vector unsigned short)__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sub(vector int __a,
-                                                  vector int __b) {
-  return __a - __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sub(vector bool int __a,
-                                                  vector int __b) {
-  return (vector int)__a - __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sub(vector int __a,
-                                                  vector bool int __b) {
-  return __a - (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sub(vector unsigned int __a, vector unsigned int __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sub(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a - __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sub(vector unsigned int __a, vector bool int __b) {
-  return __a - (vector unsigned int)__b;
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_sub(vector signed __int128 __a, vector signed __int128 __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_sub(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __a - __b;
-}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sub(vector signed long long __a, vector signed long long __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sub(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a - __b;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_sub(vector double __a,
-                                                     vector double __b) {
-  return __a - __b;
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_sub(vector float __a,
-                                                    vector float __b) {
-  return __a - __b;
-}
-
-/* vec_vsububm */
-
-#define __builtin_altivec_vsububm vec_vsububm
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsububm(vector signed char __a, vector signed char __b) {
-  return __a - __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsububm(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a - __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsububm(vector signed char __a, vector bool char __b) {
-  return __a - (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububm(vector unsigned char __a, vector unsigned char __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububm(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a - __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububm(vector unsigned char __a, vector bool char __b) {
-  return __a - (vector unsigned char)__b;
-}
-
-/* vec_vsubuhm */
-
-#define __builtin_altivec_vsubuhm vec_vsubuhm
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector short __a,
-                                                        vector short __b) {
-  return __a - __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector bool short __a,
-                                                        vector short __b) {
-  return (vector short)__a - __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector short __a,
-                                                        vector bool short __b) {
-  return __a - (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhm(vector unsigned short __a, vector unsigned short __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhm(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a - __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhm(vector unsigned short __a, vector bool short __b) {
-  return __a - (vector unsigned short)__b;
-}
-
-/* vec_vsubuwm */
-
-#define __builtin_altivec_vsubuwm vec_vsubuwm
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector int __a,
-                                                      vector int __b) {
-  return __a - __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector bool int __a,
-                                                      vector int __b) {
-  return (vector int)__a - __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector int __a,
-                                                      vector bool int __b) {
-  return __a - (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuwm(vector unsigned int __a, vector unsigned int __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuwm(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a - __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuwm(vector unsigned int __a, vector bool int __b) {
-  return __a - (vector unsigned int)__b;
-}
-
-/* vec_vsubfp */
-
-#define __builtin_altivec_vsubfp vec_vsubfp
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vsubfp(vector float __a, vector float __b) {
-  return __a - __b;
-}
-
-/* vec_subc */
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_subc(vector signed int __a, vector signed int __b) {
-  return (vector signed int)__builtin_altivec_vsubcuw((vector unsigned int)__a,
-                                                      (vector unsigned int) __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_subc(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubcuw(__a, __b);
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_subc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __builtin_altivec_vsubcuq(__a, __b);
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_subc(vector signed __int128 __a, vector signed __int128 __b) {
-  return __builtin_altivec_vsubcuq(__a, __b);
-}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-
-/* vec_vsubcuw */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vsubcuw(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubcuw(__a, __b);
-}
-
-/* vec_subs */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_subs(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vsubsbs(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_subs(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vsubsbs((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_subs(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vsubsbs(__a, (vector signed char)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_subs(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsububs(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_subs(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsububs((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_subs(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vsububs(__a, (vector unsigned char)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_subs(vector short __a,
-                                                     vector short __b) {
-  return __builtin_altivec_vsubshs(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_subs(vector bool short __a,
-                                                     vector short __b) {
-  return __builtin_altivec_vsubshs((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_subs(vector short __a,
-                                                     vector bool short __b) {
-  return __builtin_altivec_vsubshs(__a, (vector short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_subs(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vsubuhs(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_subs(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vsubuhs((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_subs(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vsubuhs(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_subs(vector int __a,
-                                                   vector int __b) {
-  return __builtin_altivec_vsubsws(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_subs(vector bool int __a,
-                                                   vector int __b) {
-  return __builtin_altivec_vsubsws((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_subs(vector int __a,
-                                                   vector bool int __b) {
-  return __builtin_altivec_vsubsws(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_subs(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubuws(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_subs(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubuws((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_subs(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b);
-}
-
-/* vec_vsubsbs */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsubsbs(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vsubsbs(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsubsbs(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vsubsbs((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsubsbs(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vsubsbs(__a, (vector signed char)__b);
-}
-
-/* vec_vsububs */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububs(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsububs(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububs(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsububs((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububs(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vsububs(__a, (vector unsigned char)__b);
-}
-
-/* vec_vsubshs */
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector short __a,
-                                                        vector short __b) {
-  return __builtin_altivec_vsubshs(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector bool short __a,
-                                                        vector short __b) {
-  return __builtin_altivec_vsubshs((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector short __a,
-                                                        vector bool short __b) {
-  return __builtin_altivec_vsubshs(__a, (vector short)__b);
-}
-
-/* vec_vsubuhs */
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhs(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vsubuhs(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhs(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vsubuhs((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhs(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vsubuhs(__a, (vector unsigned short)__b);
-}
-
-/* vec_vsubsws */
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector int __a,
-                                                      vector int __b) {
-  return __builtin_altivec_vsubsws(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector bool int __a,
-                                                      vector int __b) {
-  return __builtin_altivec_vsubsws((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector int __a,
-                                                      vector bool int __b) {
-  return __builtin_altivec_vsubsws(__a, (vector int)__b);
-}
-
-/* vec_vsubuws */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuws(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubuws(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuws(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubuws((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuws(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b);
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-/* vec_vsubuqm */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vsubuqm(vector signed __int128 __a, vector signed __int128 __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vsubuqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __a - __b;
-}
-
-/* vec_vsubeuqm */
-
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vsubeuqm(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vsubeuqm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vsubeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vsubeuqm(__a, __b, __c);
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_sube(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vsubeuqm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_sube(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vsubeuqm(__a, __b, __c);
-}
-
-/* vec_vsubcuq */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vsubcuq(vector signed __int128 __a, vector signed __int128 __b) {
-  return __builtin_altivec_vsubcuq(__a, __b);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vsubcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __builtin_altivec_vsubcuq(__a, __b);
-}
-
-/* vec_vsubecuq */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vsubecuq(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vsubecuq(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vsubecuq(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vsubecuq(__a, __b, __c);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_subec(vector signed int __a, vector signed int __b,
-             vector signed int __c) {
-  return vec_addec(__a, ~__b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_subec(vector unsigned int __a, vector unsigned int __b,
-             vector unsigned int __c) {
-  return vec_addec(__a, ~__b, __c);
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_subec(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vsubecuq(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_subec(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vsubecuq(__a, __b, __c);
-}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_sube(vector signed int __a, vector signed int __b,
-         vector signed int __c) {
-  vector signed int __mask = {1, 1, 1, 1};
-  vector signed int __carry = __c & __mask;
-  return vec_adde(__a, ~__b, __carry);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sube(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned int __c) {
-  vector unsigned int __mask = {1, 1, 1, 1};
-  vector unsigned int __carry = __c & __mask;
-  return vec_adde(__a, ~__b, __carry);
-}
-/* vec_sum4s */
-
-static __inline__ vector int __ATTRS_o_ai vec_sum4s(vector signed char __a,
-                                                    vector int __b) {
-  return __builtin_altivec_vsum4sbs(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sum4s(vector unsigned char __a, vector unsigned int __b) {
-  return __builtin_altivec_vsum4ubs(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sum4s(vector signed short __a,
-                                                    vector int __b) {
-  return __builtin_altivec_vsum4shs(__a, __b);
-}
-
-/* vec_vsum4sbs */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vsum4sbs(vector signed char __a, vector int __b) {
-  return __builtin_altivec_vsum4sbs(__a, __b);
-}
-
-/* vec_vsum4ubs */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vsum4ubs(vector unsigned char __a, vector unsigned int __b) {
-  return __builtin_altivec_vsum4ubs(__a, __b);
-}
-
-/* vec_vsum4shs */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vsum4shs(vector signed short __a, vector int __b) {
-  return __builtin_altivec_vsum4shs(__a, __b);
-}
-
-/* vec_sum2s */
-
-/* The vsum2sws instruction has a big-endian bias, so that the second
-   input vector and the result always reference big-endian elements
-   1 and 3 (little-endian element 0 and 2).  For ease of porting the
-   programmer wants elements 1 and 3 in both cases, so for little
-   endian we must perform some permutes.  */
-
-static __inline__ vector signed int __attribute__((__always_inline__))
-vec_sum2s(vector int __a, vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  vector int __c = (vector signed int)vec_perm(
-      __b, __b, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
-                                       8, 9, 10, 11));
-  __c = __builtin_altivec_vsum2sws(__a, __c);
-  return (vector signed int)vec_perm(
-      __c, __c, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
-                                       8, 9, 10, 11));
-#else
-  return __builtin_altivec_vsum2sws(__a, __b);
-#endif
-}
-
-/* vec_vsum2sws */
-
-static __inline__ vector signed int __attribute__((__always_inline__))
-vec_vsum2sws(vector int __a, vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  vector int __c = (vector signed int)vec_perm(
-      __b, __b, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
-                                       8, 9, 10, 11));
-  __c = __builtin_altivec_vsum2sws(__a, __c);
-  return (vector signed int)vec_perm(
-      __c, __c, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
-                                       8, 9, 10, 11));
-#else
-  return __builtin_altivec_vsum2sws(__a, __b);
-#endif
-}
-
-/* vec_sums */
-
-/* The vsumsws instruction has a big-endian bias, so that the second
-   input vector and the result always reference big-endian element 3
-   (little-endian element 0).  For ease of porting the programmer
-   wants element 3 in both cases, so for little endian we must perform
-   some permutes.  */
-
-static __inline__ vector signed int __attribute__((__always_inline__))
-vec_sums(vector signed int __a, vector signed int __b) {
-#ifdef __LITTLE_ENDIAN__
-  __b = (vector signed int)vec_splat(__b, 3);
-  __b = __builtin_altivec_vsumsws(__a, __b);
-  return (vector signed int)(0, 0, 0, __b[0]);
-#else
-  return __builtin_altivec_vsumsws(__a, __b);
-#endif
-}
-
-/* vec_vsumsws */
-
-static __inline__ vector signed int __attribute__((__always_inline__))
-vec_vsumsws(vector signed int __a, vector signed int __b) {
-#ifdef __LITTLE_ENDIAN__
-  __b = (vector signed int)vec_splat(__b, 3);
-  __b = __builtin_altivec_vsumsws(__a, __b);
-  return (vector signed int)(0, 0, 0, __b[0]);
-#else
-  return __builtin_altivec_vsumsws(__a, __b);
-#endif
-}
-
-/* vec_trunc */
-
-static __inline__ vector float __ATTRS_o_ai vec_trunc(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvrspiz(__a);
-#else
-  return __builtin_altivec_vrfiz(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_trunc(vector double __a) {
-  return __builtin_vsx_xvrdpiz(__a);
-}
-#endif
-
-/* vec_vrfiz */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vrfiz(vector float __a) {
-  return __builtin_altivec_vrfiz(__a);
-}
-
-/* vec_unpackh */
-
-/* The vector unpack instructions all have a big-endian bias, so for
-   little endian we must reverse the meanings of "high" and "low."  */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_unpackh(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsb((vector char)__a);
-#else
-  return __builtin_altivec_vupkhsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_unpackh(vector bool char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
-#else
-  return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_unpackh(vector short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsh(__a);
-#else
-  return __builtin_altivec_vupkhsh(__a);
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_unpackh(vector bool short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
-#else
-  return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_unpackh(vector pixel __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
-#else
-  return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector long long __ATTRS_o_ai vec_unpackh(vector int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsw(__a);
-#else
-  return __builtin_altivec_vupkhsw(__a);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_unpackh(vector bool int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
-#else
-  return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_unpackh(vector float __a) {
-  return (vector double)(__a[0], __a[1]);
-}
-#endif
-
-/* vec_vupkhsb */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vupkhsb(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsb((vector char)__a);
-#else
-  return __builtin_altivec_vupkhsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vupkhsb(vector bool char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
-#else
-  return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
-#endif
-}
-
-/* vec_vupkhsh */
-
-static __inline__ vector int __ATTRS_o_ai vec_vupkhsh(vector short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsh(__a);
-#else
-  return __builtin_altivec_vupkhsh(__a);
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vupkhsh(vector bool short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
-#else
-  return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vupkhsh(vector pixel __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
-#else
-  return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
-#endif
-}
-
-/* vec_vupkhsw */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector long long __ATTRS_o_ai vec_vupkhsw(vector int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsw(__a);
-#else
-  return __builtin_altivec_vupkhsw(__a);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vupkhsw(vector bool int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
-#else
-  return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
-#endif
-}
-#endif
-
-/* vec_unpackl */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_unpackl(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsb((vector char)__a);
-#else
-  return __builtin_altivec_vupklsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_unpackl(vector bool char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
-#else
-  return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_unpackl(vector short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsh(__a);
-#else
-  return __builtin_altivec_vupklsh(__a);
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_unpackl(vector bool short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
-#else
-  return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_unpackl(vector pixel __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
-#else
-  return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector long long __ATTRS_o_ai vec_unpackl(vector int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsw(__a);
-#else
-  return __builtin_altivec_vupklsw(__a);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_unpackl(vector bool int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
-#else
-  return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_unpackl(vector float __a) {
-  return (vector double)(__a[2], __a[3]);
-}
-#endif
-
-/* vec_vupklsb */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vupklsb(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsb((vector char)__a);
-#else
-  return __builtin_altivec_vupklsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vupklsb(vector bool char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
-#else
-  return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
-#endif
-}
-
-/* vec_vupklsh */
-
-static __inline__ vector int __ATTRS_o_ai vec_vupklsh(vector short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsh(__a);
-#else
-  return __builtin_altivec_vupklsh(__a);
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vupklsh(vector bool short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
-#else
-  return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vupklsh(vector pixel __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
-#else
-  return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
-#endif
-}
-
-/* vec_vupklsw */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector long long __ATTRS_o_ai vec_vupklsw(vector int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsw(__a);
-#else
-  return __builtin_altivec_vupklsw(__a);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vupklsw(vector bool int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
-#else
-  return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
-#endif
-}
-#endif
-
-/* vec_vsx_ld */
-
-#ifdef __VSX__
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector bool int *__b) {
-  return (vector bool int)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector signed int *__b) {
-  return (vector signed int)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_vsx_ld(int __a, const signed int *__b) {
-  return (vector signed int)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector unsigned int *__b) {
-  return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsx_ld(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector float *__b) {
-  return (vector float)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsx_ld(int __a,
-                                                       const float *__b) {
-  return (vector float)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector signed long long *__b) {
-  return (vector signed long long)__builtin_vsx_lxvd2x(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector unsigned long long *__b) {
-  return (vector unsigned long long)__builtin_vsx_lxvd2x(__a, __b);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector double *__b) {
-  return (vector double)__builtin_vsx_lxvd2x(__a, __b);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_vsx_ld(int __a, const double *__b) {
-  return (vector double)__builtin_vsx_lxvd2x(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector bool short *__b) {
-  return (vector bool short)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector signed short *__b) {
-  return (vector signed short)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_vsx_ld(int __a, const signed short *__b) {
-  return (vector signed short)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector unsigned short *__b) {
-  return (vector unsigned short)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsx_ld(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector bool char *__b) {
-  return (vector bool char)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector signed char *__b) {
-  return (vector signed char)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsx_ld(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector unsigned char *__b) {
-  return (vector unsigned char)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsx_ld(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-#endif
-
-/* vec_vsx_st */
-
-#ifdef __VSX__
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b,
-                                               vector bool int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b,
-                                               signed int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b,
-                                               unsigned int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed int __a, int __b,
-                                               vector signed int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed int __a, int __b,
-                                               signed int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned int __a, int __b,
-                                               vector unsigned int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned int __a, int __b,
-                                               unsigned int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector float __a, int __b,
-                                               vector float *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector float __a, int __b,
-                                               float *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed long long __a,
-                                               int __b,
-                                               vector signed long long *__c) {
-  __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned long long __a,
-                                               int __b,
-                                               vector unsigned long long *__c) {
-  __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector double __a, int __b,
-                                               vector double *__c) {
-  __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector double __a, int __b,
-                                               double *__c) {
-  __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b,
-                                               vector bool short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b,
-                                               signed short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b,
-                                               unsigned short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed short __a, int __b,
-                                               vector signed short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed short __a, int __b,
-                                               signed short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned short __a,
-                                               int __b,
-                                               vector unsigned short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned short __a,
-                                               int __b, unsigned short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
-                                               vector bool char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
-                                               signed char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
-                                               unsigned char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed char __a, int __b,
-                                               vector signed char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed char __a, int __b,
-                                               signed char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned char __a,
-                                               int __b,
-                                               vector unsigned char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned char __a,
-                                               int __b, unsigned char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-#endif
-
-#ifdef __VSX__
-#define vec_xxpermdi __builtin_vsx_xxpermdi
-#define vec_xxsldwi __builtin_vsx_xxsldwi
-#endif
-
-/* vec_xor */
-
-#define __builtin_altivec_vxor vec_xor
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_xor(vector signed char __a, vector signed char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_xor(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a ^ __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_xor(vector signed char __a, vector bool char __b) {
-  return __a ^ (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xor(vector unsigned char __a, vector unsigned char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xor(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a ^ __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xor(vector unsigned char __a, vector bool char __b) {
-  return __a ^ (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_xor(vector bool char __a,
-                                                        vector bool char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_xor(vector short __a,
-                                                    vector short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_xor(vector bool short __a,
-                                                    vector short __b) {
-  return (vector short)__a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_xor(vector short __a,
-                                                    vector bool short __b) {
-  return __a ^ (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xor(vector unsigned short __a, vector unsigned short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xor(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a ^ __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xor(vector unsigned short __a, vector bool short __b) {
-  return __a ^ (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_xor(vector bool short __a, vector bool short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_xor(vector int __a,
-                                                  vector int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_xor(vector bool int __a,
-                                                  vector int __b) {
-  return (vector int)__a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_xor(vector int __a,
-                                                  vector bool int __b) {
-  return __a ^ (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_xor(vector unsigned int __a, vector unsigned int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_xor(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a ^ __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_xor(vector unsigned int __a, vector bool int __b) {
-  return __a ^ (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_xor(vector bool int __a,
-                                                       vector bool int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_xor(vector float __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_xor(vector bool int __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_xor(vector float __a,
-                                                    vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_xor(vector signed long long __a, vector signed long long __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_xor(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a ^ __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_xor(vector signed long long __a, vector bool long long __b) {
-  return __a ^ (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xor(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xor(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a ^ __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xor(vector unsigned long long __a, vector bool long long __b) {
-  return __a ^ (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_xor(vector bool long long __a, vector bool long long __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_xor(vector double __a,
-                                                     vector double __b) {
-  return (vector double)((vector unsigned long long)__a ^
-                         (vector unsigned long long)__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_xor(vector double __a, vector bool long long __b) {
-  return (vector double)((vector unsigned long long)__a ^
-                         (vector unsigned long long)__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_xor(vector bool long long __a,
-                                                     vector double __b) {
-  return (vector double)((vector unsigned long long)__a ^
-                         (vector unsigned long long)__b);
-}
-#endif
-
-/* vec_vxor */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vxor(vector signed char __a, vector signed char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vxor(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a ^ __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vxor(vector signed char __a, vector bool char __b) {
-  return __a ^ (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vxor(vector unsigned char __a, vector unsigned char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vxor(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a ^ __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vxor(vector unsigned char __a, vector bool char __b) {
-  return __a ^ (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vxor(vector bool char __a,
-                                                         vector bool char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vxor(vector short __a,
-                                                     vector short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vxor(vector bool short __a,
-                                                     vector short __b) {
-  return (vector short)__a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vxor(vector short __a,
-                                                     vector bool short __b) {
-  return __a ^ (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vxor(vector unsigned short __a, vector unsigned short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vxor(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a ^ __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vxor(vector unsigned short __a, vector bool short __b) {
-  return __a ^ (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vxor(vector bool short __a, vector bool short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vxor(vector int __a,
-                                                   vector int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vxor(vector bool int __a,
-                                                   vector int __b) {
-  return (vector int)__a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vxor(vector int __a,
-                                                   vector bool int __b) {
-  return __a ^ (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vxor(vector unsigned int __a, vector unsigned int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vxor(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a ^ __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vxor(vector unsigned int __a, vector bool int __b) {
-  return __a ^ (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vxor(vector bool int __a,
-                                                        vector bool int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vxor(vector float __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vxor(vector bool int __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vxor(vector float __a,
-                                                     vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vxor(vector signed long long __a, vector signed long long __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vxor(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a ^ __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vxor(vector signed long long __a, vector bool long long __b) {
-  return __a ^ (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vxor(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vxor(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a ^ __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vxor(vector unsigned long long __a, vector bool long long __b) {
-  return __a ^ (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vxor(vector bool long long __a, vector bool long long __b) {
-  return __a ^ __b;
-}
-#endif
-
-/* ------------------------ extensions for CBEA ----------------------------- */
-
-/* vec_extract */
-
-static __inline__ signed char __ATTRS_o_ai vec_extract(vector signed char __a,
-                                                       int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned char __ATTRS_o_ai
-vec_extract(vector unsigned char __a, int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned char __ATTRS_o_ai vec_extract(vector bool char __a,
-                                                         int __b) {
-  return __a[__b];
-}
-
-static __inline__ signed short __ATTRS_o_ai vec_extract(vector signed short __a,
-                                                        int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned short __ATTRS_o_ai
-vec_extract(vector unsigned short __a, int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned short __ATTRS_o_ai vec_extract(vector bool short __a,
-                                                          int __b) {
-  return __a[__b];
-}
-
-static __inline__ signed int __ATTRS_o_ai vec_extract(vector signed int __a,
-                                                      int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned int __ATTRS_o_ai vec_extract(vector unsigned int __a,
-                                                        int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned int __ATTRS_o_ai vec_extract(vector bool int __a,
-                                                        int __b) {
-  return __a[__b];
-}
-
-#ifdef __VSX__
-static __inline__ signed long long __ATTRS_o_ai
-vec_extract(vector signed long long __a, int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned long long __ATTRS_o_ai
-vec_extract(vector unsigned long long __a, int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned long long __ATTRS_o_ai
-vec_extract(vector bool long long __a, int __b) {
-  return __a[__b];
-}
-
-static __inline__ double __ATTRS_o_ai vec_extract(vector double __a, int __b) {
-  return __a[__b];
-}
-#endif
-
-static __inline__ float __ATTRS_o_ai vec_extract(vector float __a, int __b) {
-  return __a[__b];
-}
-
-#ifdef __POWER9_VECTOR__
-
-#define vec_insert4b __builtin_vsx_insertword
-#define vec_extract4b __builtin_vsx_extractuword
-
-/* vec_extract_exp */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_extract_exp(vector float __a) {
-  return __builtin_vsx_xvxexpsp(__a);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_extract_exp(vector double __a) {
-  return __builtin_vsx_xvxexpdp(__a);
-}
-
-/* vec_extract_sig */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_extract_sig(vector float __a) {
-  return __builtin_vsx_xvxsigsp(__a);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_extract_sig (vector double __a) {
-  return __builtin_vsx_xvxsigdp(__a);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_extract_fp32_from_shorth(vector unsigned short __a) {
-  vector unsigned short __b =
-#ifdef __LITTLE_ENDIAN__
-            __builtin_shufflevector(__a, __a, 0, -1, 1, -1, 2, -1, 3, -1);
-#else
-            __builtin_shufflevector(__a, __a, -1, 0, -1, 1, -1, 2, -1, 3);
-#endif
-  return __builtin_vsx_xvcvhpsp(__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_extract_fp32_from_shortl(vector unsigned short __a) {
-  vector unsigned short __b =
-#ifdef __LITTLE_ENDIAN__
-            __builtin_shufflevector(__a, __a, 4, -1, 5, -1, 6, -1, 7, -1);
-#else
-            __builtin_shufflevector(__a, __a, -1, 4, -1, 5, -1, 6, -1, 7);
-#endif
-  return __builtin_vsx_xvcvhpsp(__b);
-}
-#endif /* __POWER9_VECTOR__ */
-
-/* vec_insert */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_insert(signed char __a, vector signed char __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_insert(unsigned char __a, vector unsigned char __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_insert(unsigned char __a,
-                                                           vector bool char __b,
-                                                           int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_insert(signed short __a, vector signed short __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_insert(unsigned short __a, vector unsigned short __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_insert(unsigned short __a, vector bool short __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_insert(signed int __a, vector signed int __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_insert(unsigned int __a, vector unsigned int __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_insert(unsigned int __a,
-                                                          vector bool int __b,
-                                                          int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_insert(signed long long __a, vector signed long long __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_insert(unsigned long long __a, vector unsigned long long __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_insert(unsigned long long __a, vector bool long long __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-static __inline__ vector double __ATTRS_o_ai vec_insert(double __a,
-                                                        vector double __b,
-                                                        int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_insert(float __a,
-                                                       vector float __b,
-                                                       int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-/* vec_lvlx */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvlx(int __a, const signed char *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector signed char)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvlx(int __a, const vector signed char *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector signed char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvlx(int __a, const unsigned char *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned char)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvlx(int __a, const vector unsigned char *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvlx(int __a, const vector bool char *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector bool char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvlx(int __a,
-                                                     const short *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector short)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvlx(int __a,
-                                                     const vector short *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvlx(int __a, const unsigned short *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned short)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvlx(int __a, const vector unsigned short *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvlx(int __a, const vector bool short *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector bool short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvlx(int __a,
-                                                     const vector pixel *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector pixel)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvlx(int __a, const int *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector int)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvlx(int __a,
-                                                   const vector int *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvlx(int __a, const unsigned int *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned int)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvlx(int __a, const vector unsigned int *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvlx(int __a, const vector bool int *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector bool int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvlx(int __a,
-                                                     const float *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector float)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvlx(int __a,
-                                                     const vector float *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector float)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-/* vec_lvlxl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvlxl(int __a, const signed char *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector signed char)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvlxl(int __a, const vector signed char *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector signed char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvlxl(int __a, const unsigned char *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned char)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvlxl(int __a, const vector unsigned char *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvlxl(int __a, const vector bool char *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector bool char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvlxl(int __a,
-                                                      const short *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector short)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvlxl(int __a,
-                                                      const vector short *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvlxl(int __a, const unsigned short *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned short)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvlxl(int __a, const vector unsigned short *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvlxl(int __a, const vector bool short *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector bool short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvlxl(int __a,
-                                                      const vector pixel *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector pixel)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvlxl(int __a, const int *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector int)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvlxl(int __a,
-                                                    const vector int *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvlxl(int __a, const unsigned int *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned int)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvlxl(int __a, const vector unsigned int *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvlxl(int __a, const vector bool int *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector bool int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvlxl(int __a,
-                                                      const float *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector float)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvlxl(int __a,
-                                                      vector float *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector float)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-/* vec_lvrx */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvrx(int __a, const signed char *__b) {
-  return vec_perm((vector signed char)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvrx(int __a, const vector signed char *__b) {
-  return vec_perm((vector signed char)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvrx(int __a, const unsigned char *__b) {
-  return vec_perm((vector unsigned char)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvrx(int __a, const vector unsigned char *__b) {
-  return vec_perm((vector unsigned char)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvrx(int __a, const vector bool char *__b) {
-  return vec_perm((vector bool char)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvrx(int __a,
-                                                     const short *__b) {
-  return vec_perm((vector short)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvrx(int __a,
-                                                     const vector short *__b) {
-  return vec_perm((vector short)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvrx(int __a, const unsigned short *__b) {
-  return vec_perm((vector unsigned short)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvrx(int __a, const vector unsigned short *__b) {
-  return vec_perm((vector unsigned short)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvrx(int __a, const vector bool short *__b) {
-  return vec_perm((vector bool short)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvrx(int __a,
-                                                     const vector pixel *__b) {
-  return vec_perm((vector pixel)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvrx(int __a, const int *__b) {
-  return vec_perm((vector int)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvrx(int __a,
-                                                   const vector int *__b) {
-  return vec_perm((vector int)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvrx(int __a, const unsigned int *__b) {
-  return vec_perm((vector unsigned int)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvrx(int __a, const vector unsigned int *__b) {
-  return vec_perm((vector unsigned int)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvrx(int __a, const vector bool int *__b) {
-  return vec_perm((vector bool int)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvrx(int __a,
-                                                     const float *__b) {
-  return vec_perm((vector float)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvrx(int __a,
-                                                     const vector float *__b) {
-  return vec_perm((vector float)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-/* vec_lvrxl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvrxl(int __a, const signed char *__b) {
-  return vec_perm((vector signed char)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvrxl(int __a, const vector signed char *__b) {
-  return vec_perm((vector signed char)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvrxl(int __a, const unsigned char *__b) {
-  return vec_perm((vector unsigned char)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvrxl(int __a, const vector unsigned char *__b) {
-  return vec_perm((vector unsigned char)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvrxl(int __a, const vector bool char *__b) {
-  return vec_perm((vector bool char)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvrxl(int __a,
-                                                      const short *__b) {
-  return vec_perm((vector short)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvrxl(int __a,
-                                                      const vector short *__b) {
-  return vec_perm((vector short)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvrxl(int __a, const unsigned short *__b) {
-  return vec_perm((vector unsigned short)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvrxl(int __a, const vector unsigned short *__b) {
-  return vec_perm((vector unsigned short)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvrxl(int __a, const vector bool short *__b) {
-  return vec_perm((vector bool short)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvrxl(int __a,
-                                                      const vector pixel *__b) {
-  return vec_perm((vector pixel)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvrxl(int __a, const int *__b) {
-  return vec_perm((vector int)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvrxl(int __a,
-                                                    const vector int *__b) {
-  return vec_perm((vector int)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvrxl(int __a, const unsigned int *__b) {
-  return vec_perm((vector unsigned int)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvrxl(int __a, const vector unsigned int *__b) {
-  return vec_perm((vector unsigned int)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvrxl(int __a, const vector bool int *__b) {
-  return vec_perm((vector bool int)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvrxl(int __a,
-                                                      const float *__b) {
-  return vec_perm((vector float)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvrxl(int __a,
-                                                      const vector float *__b) {
-  return vec_perm((vector float)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-/* vec_stvlx */
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b,
-                                              signed char *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b,
-                                              vector signed char *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b,
-                                              unsigned char *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b,
-                                              vector unsigned char *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool char __a, int __b,
-                                              vector bool char *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector short __a, int __b,
-                                              short *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector short __a, int __b,
-                                              vector short *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned short __a,
-                                              int __b, unsigned short *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned short __a,
-                                              int __b,
-                                              vector unsigned short *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool short __a, int __b,
-                                              vector bool short *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector pixel __a, int __b,
-                                              vector pixel *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector int __a, int __b,
-                                              int *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector int __a, int __b,
-                                              vector int *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b,
-                                              unsigned int *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b,
-                                              vector unsigned int *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool int __a, int __b,
-                                              vector bool int *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector float __a, int __b,
-                                              vector float *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-/* vec_stvlxl */
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b,
-                                               signed char *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b,
-                                               vector signed char *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a,
-                                               int __b, unsigned char *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a,
-                                               int __b,
-                                               vector unsigned char *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool char __a, int __b,
-                                               vector bool char *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b,
-                                               short *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b,
-                                               vector short *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a,
-                                               int __b, unsigned short *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a,
-                                               int __b,
-                                               vector unsigned short *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool short __a, int __b,
-                                               vector bool short *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector pixel __a, int __b,
-                                               vector pixel *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b,
-                                               int *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b,
-                                               vector int *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b,
-                                               unsigned int *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b,
-                                               vector unsigned int *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool int __a, int __b,
-                                               vector bool int *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector float __a, int __b,
-                                               vector float *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-/* vec_stvrx */
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b,
-                                              signed char *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b,
-                                              vector signed char *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b,
-                                              unsigned char *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b,
-                                              vector unsigned char *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool char __a, int __b,
-                                              vector bool char *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector short __a, int __b,
-                                              short *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector short __a, int __b,
-                                              vector short *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned short __a,
-                                              int __b, unsigned short *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned short __a,
-                                              int __b,
-                                              vector unsigned short *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool short __a, int __b,
-                                              vector bool short *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector pixel __a, int __b,
-                                              vector pixel *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector int __a, int __b,
-                                              int *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector int __a, int __b,
-                                              vector int *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b,
-                                              unsigned int *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b,
-                                              vector unsigned int *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool int __a, int __b,
-                                              vector bool int *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector float __a, int __b,
-                                              vector float *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-/* vec_stvrxl */
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b,
-                                               signed char *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b,
-                                               vector signed char *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a,
-                                               int __b, unsigned char *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a,
-                                               int __b,
-                                               vector unsigned char *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool char __a, int __b,
-                                               vector bool char *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b,
-                                               short *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b,
-                                               vector short *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a,
-                                               int __b, unsigned short *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a,
-                                               int __b,
-                                               vector unsigned short *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool short __a, int __b,
-                                               vector bool short *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector pixel __a, int __b,
-                                               vector pixel *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b,
-                                               int *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b,
-                                               vector int *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b,
-                                               unsigned int *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b,
-                                               vector unsigned int *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool int __a, int __b,
-                                               vector bool int *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector float __a, int __b,
-                                               vector float *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-/* vec_promote */
-
-static __inline__ vector signed char __ATTRS_o_ai vec_promote(signed char __a,
-                                                              int __b) {
-  vector signed char __res = (vector signed char)(0);
-  __res[__b] = __a;
-  return __res;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_promote(unsigned char __a, int __b) {
-  vector unsigned char __res = (vector unsigned char)(0);
-  __res[__b] = __a;
-  return __res;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_promote(short __a, int __b) {
-  vector short __res = (vector short)(0);
-  __res[__b] = __a;
-  return __res;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_promote(unsigned short __a, int __b) {
-  vector unsigned short __res = (vector unsigned short)(0);
-  __res[__b] = __a;
-  return __res;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_promote(int __a, int __b) {
-  vector int __res = (vector int)(0);
-  __res[__b] = __a;
-  return __res;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_promote(unsigned int __a,
-                                                               int __b) {
-  vector unsigned int __res = (vector unsigned int)(0);
-  __res[__b] = __a;
-  return __res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_promote(float __a, int __b) {
-  vector float __res = (vector float)(0);
-  __res[__b] = __a;
-  return __res;
-}
-
-/* vec_splats */
-
-static __inline__ vector signed char __ATTRS_o_ai vec_splats(signed char __a) {
-  return (vector signed char)(__a);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_splats(unsigned char __a) {
-  return (vector unsigned char)(__a);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_splats(short __a) {
-  return (vector short)(__a);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_splats(unsigned short __a) {
-  return (vector unsigned short)(__a);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_splats(int __a) {
-  return (vector int)(__a);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_splats(unsigned int __a) {
-  return (vector unsigned int)(__a);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_splats(signed long long __a) {
-  return (vector signed long long)(__a);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_splats(unsigned long long __a) {
-  return (vector unsigned long long)(__a);
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_splats(signed __int128 __a) {
-  return (vector signed __int128)(__a);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_splats(unsigned __int128 __a) {
-  return (vector unsigned __int128)(__a);
-}
-
-#endif
-
-static __inline__ vector double __ATTRS_o_ai vec_splats(double __a) {
-  return (vector double)(__a);
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_splats(float __a) {
-  return (vector float)(__a);
-}
-
-/* ----------------------------- predicates --------------------------------- */
-
-/* vec_all_eq */
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector pixel __a,
-                                              vector pixel __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
-                                      (vector int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
-                                              vector long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
-                                      (vector long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_LT, __a, __b);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_LT, __a, __b);
-}
-#endif
-
-/* vec_all_ge */
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, (vector signed char)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __b, (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, (vector short)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, (vector int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __b, (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __b, __a);
-}
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, (vector signed long long)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __a, __b);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_LT, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __a, __b);
-}
-#endif
-
-/* vec_all_gt */
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __a, (vector signed char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, __a, (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __a, (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a, __b);
-}
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __a, __b);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __a, __b);
-}
-#endif
-
-/* vec_all_in */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_all_in(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpbfp_p(__CR6_EQ, __a, __b);
-}
-
-/* vec_all_le */
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __a, (vector signed char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __a, (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __a, (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_all_le(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __b, __a);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_LT, __b, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_le(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __b, __a);
-}
-#endif
-
-/* vec_all_lt */
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT, (vector signed char)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, __b, (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT, (vector short)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT, (vector int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __b, (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT, (vector signed long long)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __b, __a);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __b, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __b, __a);
-}
-#endif
-
-/* vec_all_nan */
-
-static __inline__ int __ATTRS_o_ai vec_all_nan(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ, __a, __a);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_nan(vector double __a) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __a);
-}
-#endif
-
-/* vec_all_ne */
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector pixel __a,
-                                              vector pixel __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
-                                      (vector int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
-                                      (vector signed long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b);
-}
-#endif
-
-/* vec_all_nge */
-
-static __inline__ int __ATTRS_o_ai vec_all_nge(vector float __a,
-                                               vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_EQ, __a, __b);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_nge(vector double __a,
-                                               vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_EQ, __a, __b);
-}
-#endif
-
-/* vec_all_ngt */
-
-static __inline__ int __ATTRS_o_ai vec_all_ngt(vector float __a,
-                                               vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ, __a, __b);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_ngt(vector double __a,
-                                               vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ, __a, __b);
-}
-#endif
-
-/* vec_all_nle */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_all_nle(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __b, __a);
-}
-
-/* vec_all_nlt */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_all_nlt(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __b, __a);
-}
-
-/* vec_all_numeric */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_all_numeric(vector float __a) {
-  return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __a);
-}
-
-/* vec_any_eq */
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector pixel __a,
-                                              vector pixel __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ_REV, __a, __b);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ_REV, __a, __b);
-}
-#endif
-
-/* vec_any_ge */
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, (vector signed char)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, (vector short)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, (vector int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __b,
-                                      (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV,
-                                      (vector signed long long)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __a, __b);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __a, __b);
-}
-#endif
-
-/* vec_any_gt */
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __a,
-                                      (vector signed char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __a,
-                                      (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __a, __b);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __a, __b);
-}
-#endif
-
-/* vec_any_le */
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __a,
-                                      (vector signed char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __a,
-                                      (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_any_le(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __b, __a);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __b, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_le(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __b, __a);
-}
-#endif
-
-/* vec_any_lt */
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, (vector signed char)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, (vector short)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, (vector int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __b,
-                                      (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV,
-                                      (vector signed long long)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __b, __a);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __b, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __b, __a);
-}
-#endif
-
-/* vec_any_nan */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_nan(vector float __a) {
-  return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __a);
-}
-
-/* vec_any_ne */
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector pixel __a,
-                                              vector pixel __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_LT_REV, __a, __b);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_LT_REV, __a, __b);
-}
-#endif
-
-/* vec_any_nge */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_nge(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __a, __b);
-}
-
-/* vec_any_ngt */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_ngt(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __a, __b);
-}
-
-/* vec_any_nle */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_nle(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __b, __a);
-}
-
-/* vec_any_nlt */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_nlt(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __b, __a);
-}
-
-/* vec_any_numeric */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_numeric(vector float __a) {
-  return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __a);
-}
-
-/* vec_any_out */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_out(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpbfp_p(__CR6_EQ_REV, __a, __b);
-}
-
-/* Power 8 Crypto functions
-Note: We diverge from the current GCC implementation with regard
-to cryptography and related functions as follows:
-- Only the SHA and AES instructions and builtins are disabled by -mno-crypto
-- The remaining ones are only available on Power8 and up so
-  require -mpower8-vector
-The justification for this is that export requirements require that
-Category:Vector.Crypto is optional (i.e. compliant hardware may not provide
-support). As a result, we need to be able to turn off support for those.
-The remaining ones (currently controlled by -mcrypto for GCC) still
-need to be provided on compliant hardware even if Vector.Crypto is not
-provided.
-*/
-#ifdef __CRYPTO__
-#define vec_sbox_be __builtin_altivec_crypto_vsbox
-#define vec_cipher_be __builtin_altivec_crypto_vcipher
-#define vec_cipherlast_be __builtin_altivec_crypto_vcipherlast
-#define vec_ncipher_be __builtin_altivec_crypto_vncipher
-#define vec_ncipherlast_be __builtin_altivec_crypto_vncipherlast
-
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vsbox(vector unsigned long long __a) {
-  return __builtin_altivec_crypto_vsbox(__a);
-}
-
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vcipher(vector unsigned long long __a,
-                         vector unsigned long long __b) {
-  return __builtin_altivec_crypto_vcipher(__a, __b);
-}
-
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vcipherlast(vector unsigned long long __a,
-                             vector unsigned long long __b) {
-  return __builtin_altivec_crypto_vcipherlast(__a, __b);
-}
-
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vncipher(vector unsigned long long __a,
-                          vector unsigned long long __b) {
-  return __builtin_altivec_crypto_vncipher(__a, __b);
-}
-
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vncipherlast(vector unsigned long long __a,
-                              vector unsigned long long __b) {
-  return __builtin_altivec_crypto_vncipherlast(__a, __b);
-}
-
-#define __builtin_crypto_vshasigmad __builtin_altivec_crypto_vshasigmad
-#define __builtin_crypto_vshasigmaw __builtin_altivec_crypto_vshasigmaw
-
-#define vec_shasigma_be(X, Y, Z)                                               \
-  _Generic((X), vector unsigned int                                            \
-           : __builtin_crypto_vshasigmaw, vector unsigned long long            \
-           : __builtin_crypto_vshasigmad)((X), (Y), (Z))
-#endif
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool char __ATTRS_o_ai
-vec_permxor(vector bool char __a, vector bool char __b,
-            vector bool char __c) {
-  return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_permxor(vector signed char __a, vector signed char __b,
-            vector signed char __c) {
-  return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_permxor(vector unsigned char __a, vector unsigned char __b,
-            vector unsigned char __c) {
-  return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-__builtin_crypto_vpermxor(vector unsigned char __a, vector unsigned char __b,
-                          vector unsigned char __c) {
-  return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-__builtin_crypto_vpermxor(vector unsigned short __a, vector unsigned short __b,
-                          vector unsigned short __c) {
-  return (vector unsigned short)__builtin_altivec_crypto_vpermxor(
-      (vector unsigned char)__a, (vector unsigned char)__b,
-      (vector unsigned char)__c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai __builtin_crypto_vpermxor(
-    vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) {
-  return (vector unsigned int)__builtin_altivec_crypto_vpermxor(
-      (vector unsigned char)__a, (vector unsigned char)__b,
-      (vector unsigned char)__c);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-__builtin_crypto_vpermxor(vector unsigned long long __a,
-                          vector unsigned long long __b,
-                          vector unsigned long long __c) {
-  return (vector unsigned long long)__builtin_altivec_crypto_vpermxor(
-      (vector unsigned char)__a, (vector unsigned char)__b,
-      (vector unsigned char)__c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-__builtin_crypto_vpmsumb(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_crypto_vpmsumb(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-__builtin_crypto_vpmsumb(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_crypto_vpmsumh(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-__builtin_crypto_vpmsumb(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_crypto_vpmsumw(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-__builtin_crypto_vpmsumb(vector unsigned long long __a,
-                         vector unsigned long long __b) {
-  return __builtin_altivec_crypto_vpmsumd(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vgbbd(vector signed char __a) {
-  return __builtin_altivec_vgbbd((vector unsigned char)__a);
-}
-
-#define vec_pmsum_be __builtin_crypto_vpmsumb
-#define vec_gb __builtin_altivec_vgbbd
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vgbbd(vector unsigned char __a) {
-  return __builtin_altivec_vgbbd(__a);
-}
-
-static __inline__ vector long long __ATTRS_o_ai
-vec_vbpermq(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vbpermq((vector unsigned char)__a,
-                                   (vector unsigned char)__b);
-}
-
-static __inline__ vector long long __ATTRS_o_ai
-vec_vbpermq(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vbpermq(__a, __b);
-}
-
-#ifdef __powerpc64__
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-vec_bperm(vector unsigned __int128 __a, vector unsigned char __b) {
-  return __builtin_altivec_vbpermq((vector unsigned char)__a,
-                                   (vector unsigned char)__b);
-}
-#endif
-#endif
-
-
-/* vec_reve */
-
-static inline __ATTRS_o_ai vector bool char vec_reve(vector bool char __a) {
-  return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
-                                 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector signed char vec_reve(vector signed char __a) {
-  return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
-                                 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_reve(vector unsigned char __a) {
-  return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
-                                 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector bool int vec_reve(vector bool int __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector signed int vec_reve(vector signed int __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_reve(vector unsigned int __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector bool short vec_reve(vector bool short __a) {
-  return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_reve(vector signed short __a) {
-  return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_reve(vector unsigned short __a) {
-  return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector float vec_reve(vector float __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
-}
-
-#ifdef __VSX__
-static inline __ATTRS_o_ai vector bool long long
-vec_reve(vector bool long long __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_reve(vector signed long long __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_reve(vector unsigned long long __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector double vec_reve(vector double __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0);
-}
-#endif
-
-/* vec_revb */
-static __inline__ vector bool char __ATTRS_o_ai
-vec_revb(vector bool char __a) {
-  return __a;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_revb(vector signed char __a) {
-  return __a;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_revb(vector unsigned char __a) {
-  return __a;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_revb(vector bool short __a) {
-  vector unsigned char __indices =
-      { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_revb(vector signed short __a) {
-  vector unsigned char __indices =
-      { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_revb(vector unsigned short __a) {
-  vector unsigned char __indices =
-     { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_revb(vector bool int __a) {
-  vector unsigned char __indices =
-      { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_revb(vector signed int __a) {
-  vector unsigned char __indices =
-      { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_revb(vector unsigned int __a) {
-  vector unsigned char __indices =
-      { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_revb(vector float __a) {
- vector unsigned char __indices =
-      { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
- return vec_perm(__a, __a, __indices);
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_revb(vector bool long long __a) {
-  vector unsigned char __indices =
-      { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_revb(vector signed long long __a) {
-  vector unsigned char __indices =
-      { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_revb(vector unsigned long long __a) {
-  vector unsigned char __indices =
-      { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_revb(vector double __a) {
-  vector unsigned char __indices =
-      { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
-  return vec_perm(__a, __a, __indices);
-}
-#endif /* End __VSX__ */
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_revb(vector signed __int128 __a) {
-  vector unsigned char __indices =
-      { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 };
-  return (vector signed __int128)vec_perm((vector signed int)__a,
-                                          (vector signed int)__a,
-                                           __indices);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_revb(vector unsigned __int128 __a) {
-  vector unsigned char __indices =
-      { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 };
-  return (vector unsigned __int128)vec_perm((vector signed int)__a,
-                                            (vector signed int)__a,
-                                             __indices);
-}
-#endif /* END __POWER8_VECTOR__ && __powerpc64__ */
-
-/* vec_xl */
-
-typedef vector signed char unaligned_vec_schar __attribute__((aligned(1)));
-typedef vector unsigned char unaligned_vec_uchar __attribute__((aligned(1)));
-typedef vector signed short unaligned_vec_sshort __attribute__((aligned(1)));
-typedef vector unsigned short unaligned_vec_ushort __attribute__((aligned(1)));
-typedef vector signed int unaligned_vec_sint __attribute__((aligned(1)));
-typedef vector unsigned int unaligned_vec_uint __attribute__((aligned(1)));
-typedef vector float unaligned_vec_float __attribute__((aligned(1)));
-
-static inline __ATTRS_o_ai vector signed char vec_xl(signed long long __offset,
-                                                     signed char *__ptr) {
-  return *(unaligned_vec_schar *)(__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_xl(signed long long __offset, unsigned char *__ptr) {
-  return *(unaligned_vec_uchar*)(__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector signed short vec_xl(signed long long __offset,
-                                                      signed short *__ptr) {
-  return *(unaligned_vec_sshort *)(__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_xl(signed long long __offset, unsigned short *__ptr) {
-  return *(unaligned_vec_ushort *)(__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector signed int vec_xl(signed long long __offset,
-                                                    signed int *__ptr) {
-  return *(unaligned_vec_sint *)(__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector unsigned int vec_xl(signed long long __offset,
-                                                      unsigned int *__ptr) {
-  return *(unaligned_vec_uint *)(__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector float vec_xl(signed long long __offset,
-                                               float *__ptr) {
-  return *(unaligned_vec_float *)(__ptr + __offset);
-}
-
-#ifdef __VSX__
-typedef vector signed long long unaligned_vec_sll __attribute__((aligned(1)));
-typedef vector unsigned long long unaligned_vec_ull __attribute__((aligned(1)));
-typedef vector double unaligned_vec_double __attribute__((aligned(1)));
-
-static inline __ATTRS_o_ai vector signed long long
-vec_xl(signed long long __offset, signed long long *__ptr) {
-  return *(unaligned_vec_sll *)(__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_xl(signed long long __offset, unsigned long long *__ptr) {
-  return *(unaligned_vec_ull *)(__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector double vec_xl(signed long long __offset,
-                                                double *__ptr) {
-  return *(unaligned_vec_double *)(__ptr + __offset);
-}
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-typedef vector signed __int128 unaligned_vec_si128 __attribute__((aligned(1)));
-typedef vector unsigned __int128 unaligned_vec_ui128
-    __attribute__((aligned(1)));
-static inline __ATTRS_o_ai vector signed __int128
-vec_xl(signed long long __offset, signed __int128 *__ptr) {
-  return *(unaligned_vec_si128 *)(__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector unsigned __int128
-vec_xl(signed long long __offset, unsigned __int128 *__ptr) {
-  return *(unaligned_vec_ui128 *)(__ptr + __offset);
-}
-#endif
-
-/* vec_xl_be */
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector signed char __ATTRS_o_ai
-vec_xl_be(signed long long __offset, signed char *__ptr) {
-  vector signed char __vec = __builtin_vsx_lxvd2x_be(__offset, __ptr);
-  return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
-                                 13, 12, 11, 10, 9, 8);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xl_be(signed long long __offset, unsigned char *__ptr) {
-  vector unsigned char __vec = __builtin_vsx_lxvd2x_be(__offset, __ptr);
-  return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
-                                 13, 12, 11, 10, 9, 8);
-}
-
-static __inline__ vector signed short  __ATTRS_o_ai
-vec_xl_be(signed long long __offset, signed short *__ptr) {
-  vector signed short __vec = __builtin_vsx_lxvd2x_be(__offset, __ptr);
-  return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xl_be(signed long long __offset, unsigned short *__ptr) {
-  vector unsigned short __vec = __builtin_vsx_lxvd2x_be(__offset, __ptr);
-  return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, signed int *__ptr) {
-  return (vector signed int)__builtin_vsx_lxvw4x_be(__offset, __ptr);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, unsigned int *__ptr) {
-  return (vector unsigned int)__builtin_vsx_lxvw4x_be(__offset, __ptr);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, float *__ptr) {
-  return (vector float)__builtin_vsx_lxvw4x_be(__offset, __ptr);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, signed long long *__ptr) {
-  return (vector signed long long)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, unsigned long long *__ptr) {
-  return (vector unsigned long long)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, double *__ptr) {
-  return (vector double)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-}
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, signed __int128 *__ptr) {
-  return vec_xl(__offset, __ptr);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, unsigned __int128 *__ptr) {
-  return vec_xl(__offset, __ptr);
-}
-#endif
-#else
-  #define vec_xl_be vec_xl
-#endif
-
-/* vec_xst */
-
-static inline __ATTRS_o_ai void vec_xst(vector signed char __vec,
-                                        signed long long __offset,
-                                        signed char *__ptr) {
-  *(unaligned_vec_schar *)(__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector unsigned char __vec,
-                                        signed long long __offset,
-                                        unsigned char *__ptr) {
-  *(unaligned_vec_uchar *)(__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector signed short __vec,
-                                        signed long long __offset,
-                                        signed short *__ptr) {
-  *(unaligned_vec_sshort *)(__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector unsigned short __vec,
-                                        signed long long __offset,
-                                        unsigned short *__ptr) {
-  *(unaligned_vec_ushort *)(__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector signed int __vec,
-                                        signed long long __offset,
-                                        signed int *__ptr) {
-  *(unaligned_vec_sint *)(__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector unsigned int __vec,
-                                        signed long long __offset,
-                                        unsigned int *__ptr) {
-  *(unaligned_vec_uint *)(__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector float __vec,
-                                        signed long long __offset,
-                                        float *__ptr) {
-  *(unaligned_vec_float *)(__ptr + __offset) = __vec;
-}
-
-#ifdef __VSX__
-static inline __ATTRS_o_ai void vec_xst(vector signed long long __vec,
-                                        signed long long __offset,
-                                        signed long long *__ptr) {
-  *(unaligned_vec_sll *)(__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector unsigned long long __vec,
-                                        signed long long __offset,
-                                        unsigned long long *__ptr) {
-  *(unaligned_vec_ull *)(__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector double __vec,
-                                        signed long long __offset,
-                                        double *__ptr) {
-  *(unaligned_vec_double *)(__ptr + __offset) = __vec;
-}
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static inline __ATTRS_o_ai void vec_xst(vector signed __int128 __vec,
-                                        signed long long __offset,
-                                        signed __int128 *__ptr) {
-  *(unaligned_vec_si128 *)(__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
-                                        signed long long __offset,
-                                        unsigned __int128 *__ptr) {
-  *(unaligned_vec_ui128 *)(__ptr + __offset) = __vec;
-}
-#endif
-
-/* vec_xst_be */
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed char __vec,
-                                               signed long long  __offset,
-                                               signed char *__ptr) {
-  vector signed char __tmp =
-     __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
-                             13, 12, 11, 10, 9, 8);
-  __builtin_vsx_stxvd2x_be(__tmp, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned char __vec,
-                                               signed long long  __offset,
-                                               unsigned char *__ptr) {
-  vector unsigned char __tmp =
-     __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
-                             13, 12, 11, 10, 9, 8);
-  __builtin_vsx_stxvd2x_be(__tmp, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed short __vec,
-                                               signed long long  __offset,
-                                               signed short *__ptr) {
-  vector signed short __tmp =
-     __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
-  __builtin_vsx_stxvd2x_be(__tmp, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned short __vec,
-                                               signed long long  __offset,
-                                               unsigned short *__ptr) {
-  vector unsigned short __tmp =
-     __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
-  __builtin_vsx_stxvd2x_be(__tmp, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed int __vec,
-                                               signed long long  __offset,
-                                               signed int *__ptr) {
-  __builtin_vsx_stxvw4x_be(__vec, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned int __vec,
-                                               signed long long  __offset,
-                                               unsigned int *__ptr) {
-  __builtin_vsx_stxvw4x_be(__vec, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector float __vec,
-                                               signed long long  __offset,
-                                               float *__ptr) {
-  __builtin_vsx_stxvw4x_be(__vec, __offset, __ptr);
-}
-
-#ifdef __VSX__
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed long long __vec,
-                                               signed long long  __offset,
-                                               signed long long *__ptr) {
-  __builtin_vsx_stxvd2x_be(__vec, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned long long __vec,
-                                               signed long long  __offset,
-                                               unsigned long long *__ptr) {
-  __builtin_vsx_stxvd2x_be(__vec, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector double __vec,
-                                               signed long long  __offset,
-                                               double *__ptr) {
-  __builtin_vsx_stxvd2x_be(__vec, __offset, __ptr);
-}
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed __int128 __vec,
-                                               signed long long  __offset,
-                                               signed __int128 *__ptr) {
-  vec_xst(__vec, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned __int128 __vec,
-                                               signed long long  __offset,
-                                               unsigned __int128 *__ptr) {
-  vec_xst(__vec, __offset, __ptr);
-}
-#endif
-#else
-  #define vec_xst_be vec_xst
-#endif
-
-#ifdef __POWER9_VECTOR__
-#define vec_test_data_class(__a, __b)                                      \
-        _Generic((__a),                                                    \
-           vector float:                                                   \
-             (vector bool int)__builtin_vsx_xvtstdcsp((__a), (__b)),       \
-           vector double:                                                  \
-             (vector bool long long)__builtin_vsx_xvtstdcdp((__a), (__b))  \
-        )
-
-#endif /* #ifdef __POWER9_VECTOR__ */
-
-static vector float __ATTRS_o_ai vec_neg(vector float __a) {
-  return -__a;
-}
-
-#ifdef __VSX__
-static vector double __ATTRS_o_ai vec_neg(vector double __a) {
-  return -__a;
-}
-
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static vector long long __ATTRS_o_ai vec_neg(vector long long __a) {
-  return -__a;
-}
-#endif
-
-static vector signed int __ATTRS_o_ai vec_neg(vector signed int __a) {
-  return -__a;
-}
-
-static vector signed short __ATTRS_o_ai vec_neg(vector signed short __a) {
-  return -__a;
-}
-
-static vector signed char __ATTRS_o_ai vec_neg(vector signed char __a) {
-  return -__a;
-}
-
-static vector float __ATTRS_o_ai vec_nabs(vector float __a) {
-  return - vec_abs(__a);
-}
-
-#ifdef __VSX__
-static vector double __ATTRS_o_ai vec_nabs(vector double __a) {
-  return - vec_abs(__a);
-}
-
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static vector long long __ATTRS_o_ai vec_nabs(vector long long __a) {
-  return __builtin_altivec_vminsd(__a, -__a);
-}
-#endif
-
-static vector signed int __ATTRS_o_ai vec_nabs(vector signed int __a) {
-  return __builtin_altivec_vminsw(__a, -__a);
-}
-
-static vector signed short __ATTRS_o_ai vec_nabs(vector signed short __a) {
-  return __builtin_altivec_vminsh(__a, -__a);
-}
-
-static vector signed char __ATTRS_o_ai vec_nabs(vector signed char __a) {
-  return __builtin_altivec_vminsb(__a, -__a);
-}
-#undef __ATTRS_o_ai
-
-#endif /* __ALTIVEC_H */
diff --git a/linux-x86/lib64/clang/10.0.1/include/arm_acle.h b/linux-x86/lib64/clang/10.0.1/include/arm_acle.h
deleted file mode 100644
index 0510e6f..0000000
--- a/linux-x86/lib64/clang/10.0.1/include/arm_acle.h
+++ /dev/null
@@ -1,652 +0,0 @@
-/*===---- arm_acle.h - ARM Non-Neon intrinsics -----------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __ARM_ACLE_H
-#define __ARM_ACLE_H
-
-#ifndef __ARM_ACLE
-#error "ACLE intrinsics support not enabled."
-#endif
-
-#include <stdint.h>
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/* 8 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
-/* 8.3 Memory barriers */
-#if !defined(_MSC_VER)
-#define __dmb(i) __builtin_arm_dmb(i)
-#define __dsb(i) __builtin_arm_dsb(i)
-#define __isb(i) __builtin_arm_isb(i)
-#endif
-
-/* 8.4 Hints */
-
-#if !defined(_MSC_VER)
-static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfi(void) {
-  __builtin_arm_wfi();
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfe(void) {
-  __builtin_arm_wfe();
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sev(void) {
-  __builtin_arm_sev();
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__)) __sevl(void) {
-  __builtin_arm_sevl();
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(void) {
-  __builtin_arm_yield();
-}
-#endif
-
-#if __ARM_32BIT_STATE
-#define __dbg(t) __builtin_arm_dbg(t)
-#endif
-
-/* 8.5 Swap */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__swp(uint32_t __x, volatile uint32_t *__p) {
-  uint32_t v;
-  do
-    v = __builtin_arm_ldrex(__p);
-  while (__builtin_arm_strex(__x, __p));
-  return v;
-}
-
-/* 8.6 Memory prefetch intrinsics */
-/* 8.6.1 Data prefetch */
-#define __pld(addr) __pldx(0, 0, 0, addr)
-
-#if __ARM_32BIT_STATE
-#define __pldx(access_kind, cache_level, retention_policy, addr) \
-  __builtin_arm_prefetch(addr, access_kind, 1)
-#else
-#define __pldx(access_kind, cache_level, retention_policy, addr) \
-  __builtin_arm_prefetch(addr, access_kind, cache_level, retention_policy, 1)
-#endif
-
-/* 8.6.2 Instruction prefetch */
-#define __pli(addr) __plix(0, 0, addr)
-
-#if __ARM_32BIT_STATE
-#define __plix(cache_level, retention_policy, addr) \
-  __builtin_arm_prefetch(addr, 0, 0)
-#else
-#define __plix(cache_level, retention_policy, addr) \
-  __builtin_arm_prefetch(addr, 0, cache_level, retention_policy, 0)
-#endif
-
-/* 8.7 NOP */
-static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) {
-  __builtin_arm_nop();
-}
-
-/* 9 DATA-PROCESSING INTRINSICS */
-/* 9.2 Miscellaneous data-processing intrinsics */
-/* ROR */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__ror(uint32_t __x, uint32_t __y) {
-  __y %= 32;
-  if (__y == 0)
-    return __x;
-  return (__x >> __y) | (__x << (32 - __y));
-}
-
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-__rorll(uint64_t __x, uint32_t __y) {
-  __y %= 64;
-  if (__y == 0)
-    return __x;
-  return (__x >> __y) | (__x << (64 - __y));
-}
-
-static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
-__rorl(unsigned long __x, uint32_t __y) {
-#if __SIZEOF_LONG__ == 4
-  return __ror(__x, __y);
-#else
-  return __rorll(__x, __y);
-#endif
-}
-
-
-/* CLZ */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__clz(uint32_t __t) {
-  return __builtin_clz(__t);
-}
-
-static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
-__clzl(unsigned long __t) {
-  return __builtin_clzl(__t);
-}
-
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-__clzll(uint64_t __t) {
-  return __builtin_clzll(__t);
-}
-
-/* REV */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__rev(uint32_t __t) {
-  return __builtin_bswap32(__t);
-}
-
-static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
-__revl(unsigned long __t) {
-#if __SIZEOF_LONG__ == 4
-  return __builtin_bswap32(__t);
-#else
-  return __builtin_bswap64(__t);
-#endif
-}
-
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-__revll(uint64_t __t) {
-  return __builtin_bswap64(__t);
-}
-
-/* REV16 */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__rev16(uint32_t __t) {
-  return __ror(__rev(__t), 16);
-}
-
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-__rev16ll(uint64_t __t) {
-  return (((uint64_t)__rev16(__t >> 32)) << 32) | __rev16(__t);
-}
-
-static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
-__rev16l(unsigned long __t) {
-#if __SIZEOF_LONG__ == 4
-    return __rev16(__t);
-#else
-    return __rev16ll(__t);
-#endif
-}
-
-/* REVSH */
-static __inline__ int16_t __attribute__((__always_inline__, __nodebug__))
-__revsh(int16_t __t) {
-  return __builtin_bswap16(__t);
-}
-
-/* RBIT */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__rbit(uint32_t __t) {
-  return __builtin_arm_rbit(__t);
-}
-
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
-__rbitll(uint64_t __t) {
-#if __ARM_32BIT_STATE
-  return (((uint64_t)__builtin_arm_rbit(__t)) << 32) |
-         __builtin_arm_rbit(__t >> 32);
-#else
-  return __builtin_arm_rbit64(__t);
-#endif
-}
-
-static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
-__rbitl(unsigned long __t) {
-#if __SIZEOF_LONG__ == 4
-  return __rbit(__t);
-#else
-  return __rbitll(__t);
-#endif
-}
-
-/*
- * 9.3 16-bit multiplications
- */
-#if __ARM_FEATURE_DSP
-static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
-__smulbb(int32_t __a, int32_t __b) {
-  return __builtin_arm_smulbb(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
-__smulbt(int32_t __a, int32_t __b) {
-  return __builtin_arm_smulbt(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
-__smultb(int32_t __a, int32_t __b) {
-  return __builtin_arm_smultb(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
-__smultt(int32_t __a, int32_t __b) {
-  return __builtin_arm_smultt(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
-__smulwb(int32_t __a, int32_t __b) {
-  return __builtin_arm_smulwb(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
-__smulwt(int32_t __a, int32_t __b) {
-  return __builtin_arm_smulwt(__a, __b);
-}
-#endif
-
-/*
- * 9.4 Saturating intrinsics
- *
- * FIXME: Change guard to their corrosponding __ARM_FEATURE flag when Q flag
- * intrinsics are implemented and the flag is enabled.
- */
-/* 9.4.1 Width-specified saturation intrinsics */
-#if __ARM_FEATURE_SAT
-#define __ssat(x, y) __builtin_arm_ssat(x, y)
-#define __usat(x, y) __builtin_arm_usat(x, y)
-#endif
-
-/* 9.4.2 Saturating addition and subtraction intrinsics */
-#if __ARM_FEATURE_DSP
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__qadd(int32_t __t, int32_t __v) {
-  return __builtin_arm_qadd(__t, __v);
-}
-
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__qsub(int32_t __t, int32_t __v) {
-  return __builtin_arm_qsub(__t, __v);
-}
-
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__qdbl(int32_t __t) {
-  return __builtin_arm_qadd(__t, __t);
-}
-#endif
-
-/* 9.4.3 Accumultating multiplications */
-#if __ARM_FEATURE_DSP
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlabb(int32_t __a, int32_t __b, int32_t __c) {
-  return __builtin_arm_smlabb(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlabt(int32_t __a, int32_t __b, int32_t __c) {
-  return __builtin_arm_smlabt(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlatb(int32_t __a, int32_t __b, int32_t __c) {
-  return __builtin_arm_smlatb(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlatt(int32_t __a, int32_t __b, int32_t __c) {
-  return __builtin_arm_smlatt(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlawb(int32_t __a, int32_t __b, int32_t __c) {
-  return __builtin_arm_smlawb(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlawt(int32_t __a, int32_t __b, int32_t __c) {
-  return __builtin_arm_smlawt(__a, __b, __c);
-}
-#endif
-
-
-/* 9.5.4 Parallel 16-bit saturation */
-#if __ARM_FEATURE_SIMD32
-#define __ssat16(x, y) __builtin_arm_ssat16(x, y)
-#define __usat16(x, y) __builtin_arm_usat16(x, y)
-#endif
-
-/* 9.5.5 Packing and unpacking */
-#if __ARM_FEATURE_SIMD32
-typedef int32_t int8x4_t;
-typedef int32_t int16x2_t;
-typedef uint32_t uint8x4_t;
-typedef uint32_t uint16x2_t;
-
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__sxtab16(int16x2_t __a, int8x4_t __b) {
-  return __builtin_arm_sxtab16(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__sxtb16(int8x4_t __a) {
-  return __builtin_arm_sxtb16(__a);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__uxtab16(int16x2_t __a, int8x4_t __b) {
-  return __builtin_arm_uxtab16(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__uxtb16(int8x4_t __a) {
-  return __builtin_arm_uxtb16(__a);
-}
-#endif
-
-/* 9.5.6 Parallel selection */
-#if __ARM_FEATURE_SIMD32
-static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
-__sel(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_sel(__a, __b);
-}
-#endif
-
-/* 9.5.7 Parallel 8-bit addition and subtraction */
-#if __ARM_FEATURE_SIMD32
-static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
-__qadd8(int8x4_t __a, int8x4_t __b) {
-  return __builtin_arm_qadd8(__a, __b);
-}
-static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
-__qsub8(int8x4_t __a, int8x4_t __b) {
-  return __builtin_arm_qsub8(__a, __b);
-}
-static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
-__sadd8(int8x4_t __a, int8x4_t __b) {
-  return __builtin_arm_sadd8(__a, __b);
-}
-static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
-__shadd8(int8x4_t __a, int8x4_t __b) {
-  return __builtin_arm_shadd8(__a, __b);
-}
-static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
-__shsub8(int8x4_t __a, int8x4_t __b) {
-  return __builtin_arm_shsub8(__a, __b);
-}
-static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
-__ssub8(int8x4_t __a, int8x4_t __b) {
-  return __builtin_arm_ssub8(__a, __b);
-}
-static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
-__uadd8(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_uadd8(__a, __b);
-}
-static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
-__uhadd8(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_uhadd8(__a, __b);
-}
-static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
-__uhsub8(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_uhsub8(__a, __b);
-}
-static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
-__uqadd8(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_uqadd8(__a, __b);
-}
-static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
-__uqsub8(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_uqsub8(__a, __b);
-}
-static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
-__usub8(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_usub8(__a, __b);
-}
-#endif
-
-/* 9.5.8 Sum of 8-bit absolute differences */
-#if __ARM_FEATURE_SIMD32
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__usad8(uint8x4_t __a, uint8x4_t __b) {
-  return __builtin_arm_usad8(__a, __b);
-}
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__usada8(uint8x4_t __a, uint8x4_t __b, uint32_t __c) {
-  return __builtin_arm_usada8(__a, __b, __c);
-}
-#endif
-
-/* 9.5.9 Parallel 16-bit addition and subtraction */
-#if __ARM_FEATURE_SIMD32
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__qadd16(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_qadd16(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__qasx(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_qasx(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__qsax(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_qsax(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__qsub16(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_qsub16(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__sadd16(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_sadd16(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__sasx(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_sasx(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__shadd16(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_shadd16(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__shasx(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_shasx(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__shsax(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_shsax(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__shsub16(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_shsub16(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__ssax(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_ssax(__a, __b);
-}
-static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
-__ssub16(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_ssub16(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uadd16(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uadd16(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uasx(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uasx(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uhadd16(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uhadd16(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uhasx(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uhasx(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uhsax(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uhsax(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uhsub16(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uhsub16(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uqadd16(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uqadd16(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uqasx(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uqasx(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uqsax(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uqsax(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__uqsub16(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_uqsub16(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__usax(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_usax(__a, __b);
-}
-static __inline__ uint16x2_t __attribute__((__always_inline__, __nodebug__))
-__usub16(uint16x2_t __a, uint16x2_t __b) {
-  return __builtin_arm_usub16(__a, __b);
-}
-#endif
-
-/* 9.5.10 Parallel 16-bit multiplications */
-#if __ARM_FEATURE_SIMD32
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlad(int16x2_t __a, int16x2_t __b, int32_t __c) {
-  return __builtin_arm_smlad(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smladx(int16x2_t __a, int16x2_t __b, int32_t __c) {
-  return __builtin_arm_smladx(__a, __b, __c);
-}
-static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
-__smlald(int16x2_t __a, int16x2_t __b, int64_t __c) {
-  return __builtin_arm_smlald(__a, __b, __c);
-}
-static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
-__smlaldx(int16x2_t __a, int16x2_t __b, int64_t __c) {
-  return __builtin_arm_smlaldx(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlsd(int16x2_t __a, int16x2_t __b, int32_t __c) {
-  return __builtin_arm_smlsd(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smlsdx(int16x2_t __a, int16x2_t __b, int32_t __c) {
-  return __builtin_arm_smlsdx(__a, __b, __c);
-}
-static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
-__smlsld(int16x2_t __a, int16x2_t __b, int64_t __c) {
-  return __builtin_arm_smlsld(__a, __b, __c);
-}
-static __inline__ int64_t __attribute__((__always_inline__, __nodebug__))
-__smlsldx(int16x2_t __a, int16x2_t __b, int64_t __c) {
-  return __builtin_arm_smlsldx(__a, __b, __c);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smuad(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_smuad(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smuadx(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_smuadx(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smusd(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_smusd(__a, __b);
-}
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__smusdx(int16x2_t __a, int16x2_t __b) {
-  return __builtin_arm_smusdx(__a, __b);
-}
-#endif
-
-/* 9.7 CRC32 intrinsics */
-#if __ARM_FEATURE_CRC32
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32b(uint32_t __a, uint8_t __b) {
-  return __builtin_arm_crc32b(__a, __b);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32h(uint32_t __a, uint16_t __b) {
-  return __builtin_arm_crc32h(__a, __b);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32w(uint32_t __a, uint32_t __b) {
-  return __builtin_arm_crc32w(__a, __b);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32d(uint32_t __a, uint64_t __b) {
-  return __builtin_arm_crc32d(__a, __b);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32cb(uint32_t __a, uint8_t __b) {
-  return __builtin_arm_crc32cb(__a, __b);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32ch(uint32_t __a, uint16_t __b) {
-  return __builtin_arm_crc32ch(__a, __b);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32cw(uint32_t __a, uint32_t __b) {
-  return __builtin_arm_crc32cw(__a, __b);
-}
-
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
-__crc32cd(uint32_t __a, uint64_t __b) {
-  return __builtin_arm_crc32cd(__a, __b);
-}
-#endif
-
-/* Armv8.3-A Javascript conversion intrinsic */
-#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_JCVT)
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
-__jcvt(double __a) {
-  return __builtin_arm_jcvt(__a);
-}
-#endif
-
-/* 10.1 Special register intrinsics */
-#define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg)
-#define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg)
-#define __arm_rsrp(sysreg) __builtin_arm_rsrp(sysreg)
-#define __arm_wsr(sysreg, v) __builtin_arm_wsr(sysreg, v)
-#define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v)
-#define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v)
-
-/* Memory Tagging Extensions (MTE) Intrinsics */
-#if __ARM_FEATURE_MEMORY_TAGGING
-#define __arm_mte_create_random_tag(__ptr, __mask)  __builtin_arm_irg(__ptr, __mask)
-#define __arm_mte_increment_tag(__ptr, __tag_offset)  __builtin_arm_addg(__ptr, __tag_offset)
-#define __arm_mte_exclude_tag(__ptr, __excluded)  __builtin_arm_gmi(__ptr, __excluded)
-#define __arm_mte_get_tag(__ptr) __builtin_arm_ldg(__ptr)
-#define __arm_mte_set_tag(__ptr) __builtin_arm_stg(__ptr)
-#define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb)
-#endif
-
-/* Transactional Memory Extension (TME) Intrinsics */
-#if __ARM_FEATURE_TME
-
-#define _TMFAILURE_REASON  0x00007fffu
-#define _TMFAILURE_RTRY    0x00008000u
-#define _TMFAILURE_CNCL    0x00010000u
-#define _TMFAILURE_MEM     0x00020000u
-#define _TMFAILURE_IMP     0x00040000u
-#define _TMFAILURE_ERR     0x00080000u
-#define _TMFAILURE_SIZE    0x00100000u
-#define _TMFAILURE_NEST    0x00200000u
-#define _TMFAILURE_DBG     0x00400000u
-#define _TMFAILURE_INT     0x00800000u
-#define _TMFAILURE_TRIVIAL 0x01000000u
-
-#define __tstart()        __builtin_arm_tstart()
-#define __tcommit()       __builtin_arm_tcommit()
-#define __tcancel(__arg)  __builtin_arm_tcancel(__arg)
-#define __ttest()         __builtin_arm_ttest()
-
-#endif /* __ARM_FEATURE_TME */
-
-#if defined(__cplusplus)
-}
-#endif
-
-#endif /* __ARM_ACLE_H */
diff --git a/linux-x86/lib64/clang/10.0.1/include/arm_fp16.h b/linux-x86/lib64/clang/10.0.1/include/arm_fp16.h
deleted file mode 100644
index 1c04f2a..0000000
--- a/linux-x86/lib64/clang/10.0.1/include/arm_fp16.h
+++ /dev/null
@@ -1,590 +0,0 @@
-/*===---- arm_fp16.h - ARM FP16 intrinsics ---------------------------------===
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __ARM_FP16_H
-#define __ARM_FP16_H
-
-#include <stdint.h>
-
-typedef __fp16 float16_t;
-#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))
-
-#if defined(__ARM_FEATURE_FP16_SCALAR_ARITHMETIC) && defined(__aarch64__)
-#define vabdh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vabdh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vabsh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vabsh_f16(__s0); \
-  __ret; \
-})
-#define vaddh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vaddh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vcageh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcageh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vcagth_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcagth_f16(__s0, __s1); \
-  __ret; \
-})
-#define vcaleh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcaleh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vcalth_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcalth_f16(__s0, __s1); \
-  __ret; \
-})
-#define vceqh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vceqh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vceqzh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vceqzh_f16(__s0); \
-  __ret; \
-})
-#define vcgeh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcgeh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vcgezh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcgezh_f16(__s0); \
-  __ret; \
-})
-#define vcgth_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcgth_f16(__s0, __s1); \
-  __ret; \
-})
-#define vcgtzh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcgtzh_f16(__s0); \
-  __ret; \
-})
-#define vcleh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcleh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vclezh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vclezh_f16(__s0); \
-  __ret; \
-})
-#define vclth_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vclth_f16(__s0, __s1); \
-  __ret; \
-})
-#define vcltzh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcltzh_f16(__s0); \
-  __ret; \
-})
-#define vcvth_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vcvth_n_s16_f16(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_s32_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vcvth_n_s32_f16(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_s64_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vcvth_n_s64_f16(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcvth_n_u16_f16(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_u32_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vcvth_n_u32_f16(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_u64_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vcvth_n_u64_f16(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_s16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vcvth_s16_f16(__s0); \
-  __ret; \
-})
-#define vcvth_s32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vcvth_s32_f16(__s0); \
-  __ret; \
-})
-#define vcvth_s64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vcvth_s64_f16(__s0); \
-  __ret; \
-})
-#define vcvth_u16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcvth_u16_f16(__s0); \
-  __ret; \
-})
-#define vcvth_u32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vcvth_u32_f16(__s0); \
-  __ret; \
-})
-#define vcvth_u64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vcvth_u64_f16(__s0); \
-  __ret; \
-})
-#define vcvtah_s16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vcvtah_s16_f16(__s0); \
-  __ret; \
-})
-#define vcvtah_s32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vcvtah_s32_f16(__s0); \
-  __ret; \
-})
-#define vcvtah_s64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vcvtah_s64_f16(__s0); \
-  __ret; \
-})
-#define vcvtah_u16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcvtah_u16_f16(__s0); \
-  __ret; \
-})
-#define vcvtah_u32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vcvtah_u32_f16(__s0); \
-  __ret; \
-})
-#define vcvtah_u64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vcvtah_u64_f16(__s0); \
-  __ret; \
-})
-__ai float16_t vcvth_f16_u32(uint32_t __p0) {
-  float16_t __ret;
-  __ret = (float16_t) __builtin_neon_vcvth_f16_u32(__p0);
-  return __ret;
-}
-__ai float16_t vcvth_f16_u64(uint64_t __p0) {
-  float16_t __ret;
-  __ret = (float16_t) __builtin_neon_vcvth_f16_u64(__p0);
-  return __ret;
-}
-__ai float16_t vcvth_f16_u16(uint16_t __p0) {
-  float16_t __ret;
-  __ret = (float16_t) __builtin_neon_vcvth_f16_u16(__p0);
-  return __ret;
-}
-__ai float16_t vcvth_f16_s32(int32_t __p0) {
-  float16_t __ret;
-  __ret = (float16_t) __builtin_neon_vcvth_f16_s32(__p0);
-  return __ret;
-}
-__ai float16_t vcvth_f16_s64(int64_t __p0) {
-  float16_t __ret;
-  __ret = (float16_t) __builtin_neon_vcvth_f16_s64(__p0);
-  return __ret;
-}
-__ai float16_t vcvth_f16_s16(int16_t __p0) {
-  float16_t __ret;
-  __ret = (float16_t) __builtin_neon_vcvth_f16_s16(__p0);
-  return __ret;
-}
-#define vcvth_n_f16_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_n_f16_u32(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_f16_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_n_f16_u64(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_n_f16_u16(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_f16_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_n_f16_s32(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_f16_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_n_f16_s64(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_n_f16_s16(__s0, __p1); \
-  __ret; \
-})
-#define vcvtmh_s16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vcvtmh_s16_f16(__s0); \
-  __ret; \
-})
-#define vcvtmh_s32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vcvtmh_s32_f16(__s0); \
-  __ret; \
-})
-#define vcvtmh_s64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vcvtmh_s64_f16(__s0); \
-  __ret; \
-})
-#define vcvtmh_u16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcvtmh_u16_f16(__s0); \
-  __ret; \
-})
-#define vcvtmh_u32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vcvtmh_u32_f16(__s0); \
-  __ret; \
-})
-#define vcvtmh_u64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vcvtmh_u64_f16(__s0); \
-  __ret; \
-})
-#define vcvtnh_s16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vcvtnh_s16_f16(__s0); \
-  __ret; \
-})
-#define vcvtnh_s32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vcvtnh_s32_f16(__s0); \
-  __ret; \
-})
-#define vcvtnh_s64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vcvtnh_s64_f16(__s0); \
-  __ret; \
-})
-#define vcvtnh_u16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcvtnh_u16_f16(__s0); \
-  __ret; \
-})
-#define vcvtnh_u32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vcvtnh_u32_f16(__s0); \
-  __ret; \
-})
-#define vcvtnh_u64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vcvtnh_u64_f16(__s0); \
-  __ret; \
-})
-#define vcvtph_s16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vcvtph_s16_f16(__s0); \
-  __ret; \
-})
-#define vcvtph_s32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vcvtph_s32_f16(__s0); \
-  __ret; \
-})
-#define vcvtph_s64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vcvtph_s64_f16(__s0); \
-  __ret; \
-})
-#define vcvtph_u16_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vcvtph_u16_f16(__s0); \
-  __ret; \
-})
-#define vcvtph_u32_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vcvtph_u32_f16(__s0); \
-  __ret; \
-})
-#define vcvtph_u64_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vcvtph_u64_f16(__s0); \
-  __ret; \
-})
-#define vdivh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vdivh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vfmah_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_f16(__s0, __s1, __s2); \
-  __ret; \
-})
-#define vfmsh_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmsh_f16(__s0, __s1, __s2); \
-  __ret; \
-})
-#define vmaxh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vmaxnmh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vminh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vminnmh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vmulh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vmulxh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vnegh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vnegh_f16(__s0); \
-  __ret; \
-})
-#define vrecpeh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrecpeh_f16(__s0); \
-  __ret; \
-})
-#define vrecpsh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrecpsh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vrecpxh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrecpxh_f16(__s0); \
-  __ret; \
-})
-#define vrndh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrndh_f16(__s0); \
-  __ret; \
-})
-#define vrndah_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrndah_f16(__s0); \
-  __ret; \
-})
-#define vrndih_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrndih_f16(__s0); \
-  __ret; \
-})
-#define vrndmh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrndmh_f16(__s0); \
-  __ret; \
-})
-#define vrndnh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrndnh_f16(__s0); \
-  __ret; \
-})
-#define vrndph_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrndph_f16(__s0); \
-  __ret; \
-})
-#define vrndxh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrndxh_f16(__s0); \
-  __ret; \
-})
-#define vrsqrteh_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrsqrteh_f16(__s0); \
-  __ret; \
-})
-#define vrsqrtsh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vrsqrtsh_f16(__s0, __s1); \
-  __ret; \
-})
-#define vsqrth_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vsqrth_f16(__s0); \
-  __ret; \
-})
-#define vsubh_f16(__p0, __p1) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vsubh_f16(__s0, __s1); \
-  __ret; \
-})
-#endif
-
-#undef __ai
-
-#endif /* __ARM_FP16_H */
diff --git a/linux-x86/lib64/clang/10.0.1/include/arm_neon.h b/linux-x86/lib64/clang/10.0.1/include/arm_neon.h
deleted file mode 100644
index 6b55c4b..0000000
--- a/linux-x86/lib64/clang/10.0.1/include/arm_neon.h
+++ /dev/null
@@ -1,62864 +0,0 @@
-/*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------===
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __ARM_NEON_H
-#define __ARM_NEON_H
-
-#if !defined(__ARM_NEON)
-#error "NEON support not enabled"
-#endif
-
-#include <stdint.h>
-
-typedef float float32_t;
-typedef __fp16 float16_t;
-#ifdef __aarch64__
-typedef double float64_t;
-#endif
-
-#ifdef __aarch64__
-typedef uint8_t poly8_t;
-typedef uint16_t poly16_t;
-typedef uint64_t poly64_t;
-typedef __uint128_t poly128_t;
-#else
-typedef int8_t poly8_t;
-typedef int16_t poly16_t;
-#endif
-typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t;
-typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t;
-typedef __attribute__((neon_vector_type(4))) int16_t int16x4_t;
-typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t;
-typedef __attribute__((neon_vector_type(2))) int32_t int32x2_t;
-typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t;
-typedef __attribute__((neon_vector_type(1))) int64_t int64x1_t;
-typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t;
-typedef __attribute__((neon_vector_type(8))) uint8_t uint8x8_t;
-typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t;
-typedef __attribute__((neon_vector_type(4))) uint16_t uint16x4_t;
-typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t;
-typedef __attribute__((neon_vector_type(2))) uint32_t uint32x2_t;
-typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t;
-typedef __attribute__((neon_vector_type(1))) uint64_t uint64x1_t;
-typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t;
-typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t;
-typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t;
-typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t;
-typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t;
-#ifdef __aarch64__
-typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t;
-typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t;
-#endif
-typedef __attribute__((neon_polyvector_type(8))) poly8_t poly8x8_t;
-typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t;
-typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t;
-typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t;
-#ifdef __aarch64__
-typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t;
-typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t;
-#endif
-
-typedef struct int8x8x2_t {
-  int8x8_t val[2];
-} int8x8x2_t;
-
-typedef struct int8x16x2_t {
-  int8x16_t val[2];
-} int8x16x2_t;
-
-typedef struct int16x4x2_t {
-  int16x4_t val[2];
-} int16x4x2_t;
-
-typedef struct int16x8x2_t {
-  int16x8_t val[2];
-} int16x8x2_t;
-
-typedef struct int32x2x2_t {
-  int32x2_t val[2];
-} int32x2x2_t;
-
-typedef struct int32x4x2_t {
-  int32x4_t val[2];
-} int32x4x2_t;
-
-typedef struct int64x1x2_t {
-  int64x1_t val[2];
-} int64x1x2_t;
-
-typedef struct int64x2x2_t {
-  int64x2_t val[2];
-} int64x2x2_t;
-
-typedef struct uint8x8x2_t {
-  uint8x8_t val[2];
-} uint8x8x2_t;
-
-typedef struct uint8x16x2_t {
-  uint8x16_t val[2];
-} uint8x16x2_t;
-
-typedef struct uint16x4x2_t {
-  uint16x4_t val[2];
-} uint16x4x2_t;
-
-typedef struct uint16x8x2_t {
-  uint16x8_t val[2];
-} uint16x8x2_t;
-
-typedef struct uint32x2x2_t {
-  uint32x2_t val[2];
-} uint32x2x2_t;
-
-typedef struct uint32x4x2_t {
-  uint32x4_t val[2];
-} uint32x4x2_t;
-
-typedef struct uint64x1x2_t {
-  uint64x1_t val[2];
-} uint64x1x2_t;
-
-typedef struct uint64x2x2_t {
-  uint64x2_t val[2];
-} uint64x2x2_t;
-
-typedef struct float16x4x2_t {
-  float16x4_t val[2];
-} float16x4x2_t;
-
-typedef struct float16x8x2_t {
-  float16x8_t val[2];
-} float16x8x2_t;
-
-typedef struct float32x2x2_t {
-  float32x2_t val[2];
-} float32x2x2_t;
-
-typedef struct float32x4x2_t {
-  float32x4_t val[2];
-} float32x4x2_t;
-
-#ifdef __aarch64__
-typedef struct float64x1x2_t {
-  float64x1_t val[2];
-} float64x1x2_t;
-
-typedef struct float64x2x2_t {
-  float64x2_t val[2];
-} float64x2x2_t;
-
-#endif
-typedef struct poly8x8x2_t {
-  poly8x8_t val[2];
-} poly8x8x2_t;
-
-typedef struct poly8x16x2_t {
-  poly8x16_t val[2];
-} poly8x16x2_t;
-
-typedef struct poly16x4x2_t {
-  poly16x4_t val[2];
-} poly16x4x2_t;
-
-typedef struct poly16x8x2_t {
-  poly16x8_t val[2];
-} poly16x8x2_t;
-
-#ifdef __aarch64__
-typedef struct poly64x1x2_t {
-  poly64x1_t val[2];
-} poly64x1x2_t;
-
-typedef struct poly64x2x2_t {
-  poly64x2_t val[2];
-} poly64x2x2_t;
-
-#endif
-typedef struct int8x8x3_t {
-  int8x8_t val[3];
-} int8x8x3_t;
-
-typedef struct int8x16x3_t {
-  int8x16_t val[3];
-} int8x16x3_t;
-
-typedef struct int16x4x3_t {
-  int16x4_t val[3];
-} int16x4x3_t;
-
-typedef struct int16x8x3_t {
-  int16x8_t val[3];
-} int16x8x3_t;
-
-typedef struct int32x2x3_t {
-  int32x2_t val[3];
-} int32x2x3_t;
-
-typedef struct int32x4x3_t {
-  int32x4_t val[3];
-} int32x4x3_t;
-
-typedef struct int64x1x3_t {
-  int64x1_t val[3];
-} int64x1x3_t;
-
-typedef struct int64x2x3_t {
-  int64x2_t val[3];
-} int64x2x3_t;
-
-typedef struct uint8x8x3_t {
-  uint8x8_t val[3];
-} uint8x8x3_t;
-
-typedef struct uint8x16x3_t {
-  uint8x16_t val[3];
-} uint8x16x3_t;
-
-typedef struct uint16x4x3_t {
-  uint16x4_t val[3];
-} uint16x4x3_t;
-
-typedef struct uint16x8x3_t {
-  uint16x8_t val[3];
-} uint16x8x3_t;
-
-typedef struct uint32x2x3_t {
-  uint32x2_t val[3];
-} uint32x2x3_t;
-
-typedef struct uint32x4x3_t {
-  uint32x4_t val[3];
-} uint32x4x3_t;
-
-typedef struct uint64x1x3_t {
-  uint64x1_t val[3];
-} uint64x1x3_t;
-
-typedef struct uint64x2x3_t {
-  uint64x2_t val[3];
-} uint64x2x3_t;
-
-typedef struct float16x4x3_t {
-  float16x4_t val[3];
-} float16x4x3_t;
-
-typedef struct float16x8x3_t {
-  float16x8_t val[3];
-} float16x8x3_t;
-
-typedef struct float32x2x3_t {
-  float32x2_t val[3];
-} float32x2x3_t;
-
-typedef struct float32x4x3_t {
-  float32x4_t val[3];
-} float32x4x3_t;
-
-#ifdef __aarch64__
-typedef struct float64x1x3_t {
-  float64x1_t val[3];
-} float64x1x3_t;
-
-typedef struct float64x2x3_t {
-  float64x2_t val[3];
-} float64x2x3_t;
-
-#endif
-typedef struct poly8x8x3_t {
-  poly8x8_t val[3];
-} poly8x8x3_t;
-
-typedef struct poly8x16x3_t {
-  poly8x16_t val[3];
-} poly8x16x3_t;
-
-typedef struct poly16x4x3_t {
-  poly16x4_t val[3];
-} poly16x4x3_t;
-
-typedef struct poly16x8x3_t {
-  poly16x8_t val[3];
-} poly16x8x3_t;
-
-#ifdef __aarch64__
-typedef struct poly64x1x3_t {
-  poly64x1_t val[3];
-} poly64x1x3_t;
-
-typedef struct poly64x2x3_t {
-  poly64x2_t val[3];
-} poly64x2x3_t;
-
-#endif
-typedef struct int8x8x4_t {
-  int8x8_t val[4];
-} int8x8x4_t;
-
-typedef struct int8x16x4_t {
-  int8x16_t val[4];
-} int8x16x4_t;
-
-typedef struct int16x4x4_t {
-  int16x4_t val[4];
-} int16x4x4_t;
-
-typedef struct int16x8x4_t {
-  int16x8_t val[4];
-} int16x8x4_t;
-
-typedef struct int32x2x4_t {
-  int32x2_t val[4];
-} int32x2x4_t;
-
-typedef struct int32x4x4_t {
-  int32x4_t val[4];
-} int32x4x4_t;
-
-typedef struct int64x1x4_t {
-  int64x1_t val[4];
-} int64x1x4_t;
-
-typedef struct int64x2x4_t {
-  int64x2_t val[4];
-} int64x2x4_t;
-
-typedef struct uint8x8x4_t {
-  uint8x8_t val[4];
-} uint8x8x4_t;
-
-typedef struct uint8x16x4_t {
-  uint8x16_t val[4];
-} uint8x16x4_t;
-
-typedef struct uint16x4x4_t {
-  uint16x4_t val[4];
-} uint16x4x4_t;
-
-typedef struct uint16x8x4_t {
-  uint16x8_t val[4];
-} uint16x8x4_t;
-
-typedef struct uint32x2x4_t {
-  uint32x2_t val[4];
-} uint32x2x4_t;
-
-typedef struct uint32x4x4_t {
-  uint32x4_t val[4];
-} uint32x4x4_t;
-
-typedef struct uint64x1x4_t {
-  uint64x1_t val[4];
-} uint64x1x4_t;
-
-typedef struct uint64x2x4_t {
-  uint64x2_t val[4];
-} uint64x2x4_t;
-
-typedef struct float16x4x4_t {
-  float16x4_t val[4];
-} float16x4x4_t;
-
-typedef struct float16x8x4_t {
-  float16x8_t val[4];
-} float16x8x4_t;
-
-typedef struct float32x2x4_t {
-  float32x2_t val[4];
-} float32x2x4_t;
-
-typedef struct float32x4x4_t {
-  float32x4_t val[4];
-} float32x4x4_t;
-
-#ifdef __aarch64__
-typedef struct float64x1x4_t {
-  float64x1_t val[4];
-} float64x1x4_t;
-
-typedef struct float64x2x4_t {
-  float64x2_t val[4];
-} float64x2x4_t;
-
-#endif
-typedef struct poly8x8x4_t {
-  poly8x8_t val[4];
-} poly8x8x4_t;
-
-typedef struct poly8x16x4_t {
-  poly8x16_t val[4];
-} poly8x16x4_t;
-
-typedef struct poly16x4x4_t {
-  poly16x4_t val[4];
-} poly16x4x4_t;
-
-typedef struct poly16x8x4_t {
-  poly16x8_t val[4];
-} poly16x8x4_t;
-
-#ifdef __aarch64__
-typedef struct poly64x1x4_t {
-  poly64x1_t val[4];
-} poly64x1x4_t;
-
-typedef struct poly64x2x4_t {
-  poly64x2_t val[4];
-} poly64x2x4_t;
-
-#endif
-
-#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vabsq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vabsq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vabsq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vabsq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabsq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vabsq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabsq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vabsq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vabs_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vabs_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vabs_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vabs_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vabs_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vabs_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vabs_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vabs_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 5);
-  return __ret;
-}
-#else
-__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 37);
-  return __ret;
-}
-#else
-__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vclsq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vclsq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vclsq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vclsq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vclsq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vclsq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vcls_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vcls_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcls_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcls_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcls_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcls_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vclzq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vclzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vclzq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vclzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vclzq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vclzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclz_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclz_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclz_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclz_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclz_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclz_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vclz_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vclz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vclz_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vclz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vclz_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vclz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vcntq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vcntq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vcnt_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vcnt_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-__ai poly8x8_t vcreate_p8(uint64_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vcreate_p16(uint64_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vcreate_u8(uint64_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vcreate_u32(uint64_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vcreate_u64(uint64_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vcreate_u16(uint64_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vcreate_s8(uint64_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vcreate_f32(uint64_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vcreate_f16(uint64_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vcreate_s32(uint64_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vcreate_s64(uint64_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vcreate_s16(uint64_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vdup_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vdup_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vdup_n_p8(poly8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly8x8_t vdup_n_p8(poly8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vdup_n_p16(poly16_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly16x4_t vdup_n_p16(poly16_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vdupq_n_s8(int8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int8x16_t vdupq_n_s8(int8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vdupq_n_f32(float32_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai float32x4_t vdupq_n_f32(float32_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vdupq_n_s32(int32_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int32x4_t vdupq_n_s32(int32_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vdupq_n_s64(int64_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai int64x2_t vdupq_n_s64(int64_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vdupq_n_s16(int16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int16x8_t vdupq_n_s16(int16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vdup_n_u8(uint8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint8x8_t vdup_n_u8(uint8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vdup_n_u32(uint32_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai uint32x2_t vdup_n_u32(uint32_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vdup_n_u64(uint64_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vdup_n_u16(uint16_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint16x4_t vdup_n_u16(uint16_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vdup_n_s8(int8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int8x8_t vdup_n_s8(int8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vdup_n_f32(float32_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float32x2_t vdup_n_f32(float32_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vdup_n_s32(int32_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai int32x2_t vdup_n_s32(int32_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vdup_n_s64(int64_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vdup_n_s16(int16_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int16x4_t vdup_n_s16(int16_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
-  __ret; \
-})
-#else
-#define vext_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
-  __ret; \
-})
-#else
-#define vext_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
-  __ret; \
-})
-#else
-#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
-  __ret; \
-})
-#else
-#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 41); \
-  __ret; \
-})
-#else
-#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vext_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vext_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vext_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vext_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vext_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vext_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 9); \
-  __ret; \
-})
-#else
-#define vext_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vext_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vext_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vext_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vext_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#else
-__ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vget_high_s8(int8x16_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai int8x8_t vget_high_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vget_high_s8(int8x16_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vget_high_f32(float32x4_t __p0) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#else
-__ai float32x2_t vget_high_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vget_high_f32(float32x4_t __p0) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vget_high_f16(float16x8_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vget_high_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vget_high_f16(float16x8_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vget_high_s32(int32x4_t __p0) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#else
-__ai int32x2_t vget_high_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vget_high_s32(int32x4_t __p0) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x1_t vget_high_s64(int64x2_t __p0) {
-  int64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai int64x1_t vget_high_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vget_high_s16(int16x8_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai int16x4_t vget_high_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vget_high_s16(int16x8_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vget_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vget_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vget_low_s8(int8x16_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai int8x8_t vget_low_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vget_low_f32(float32x4_t __p0) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
-  return __ret;
-}
-#else
-__ai float32x2_t vget_low_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vget_low_f16(float16x8_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai float16x4_t vget_low_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vget_low_s32(int32x4_t __p0) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
-  return __ret;
-}
-#else
-__ai int32x2_t vget_low_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x1_t vget_low_s64(int64x2_t __p0) {
-  int64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai int64x1_t vget_low_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vget_low_s16(int16x8_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai int16x4_t vget_low_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p8(__p0) __extension__ ({ \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_p8(__p0) __extension__ ({ \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p16(__p0) __extension__ ({ \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_p16(__p0) __extension__ ({ \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u8(__p0) __extension__ ({ \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_u8(__p0) __extension__ ({ \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u32(__p0) __extension__ ({ \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_u32(__p0) __extension__ ({ \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u64(__p0) __extension__ ({ \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_u64(__p0) __extension__ ({ \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u16(__p0) __extension__ ({ \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_u16(__p0) __extension__ ({ \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s8(__p0) __extension__ ({ \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_s8(__p0) __extension__ ({ \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f32(__p0) __extension__ ({ \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_f32(__p0) __extension__ ({ \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s32(__p0) __extension__ ({ \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_s32(__p0) __extension__ ({ \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s64(__p0) __extension__ ({ \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_s64(__p0) __extension__ ({ \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s16(__p0) __extension__ ({ \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_s16(__p0) __extension__ ({ \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_u64(__p0) __extension__ ({ \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_s64(__p0) __extension__ ({ \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_dup_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_dup_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_p8(__p0) __extension__ ({ \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_dup_p8(__p0) __extension__ ({ \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_p16(__p0) __extension__ ({ \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_dup_p16(__p0) __extension__ ({ \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_u8(__p0) __extension__ ({ \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_dup_u8(__p0) __extension__ ({ \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_u32(__p0) __extension__ ({ \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_dup_u32(__p0) __extension__ ({ \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_u64(__p0) __extension__ ({ \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_dup_u64(__p0) __extension__ ({ \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_u16(__p0) __extension__ ({ \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_dup_u16(__p0) __extension__ ({ \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_s8(__p0) __extension__ ({ \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_dup_s8(__p0) __extension__ ({ \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_f32(__p0) __extension__ ({ \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_dup_f32(__p0) __extension__ ({ \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_s32(__p0) __extension__ ({ \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_dup_s32(__p0) __extension__ ({ \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_s64(__p0) __extension__ ({ \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_dup_s64(__p0) __extension__ ({ \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_s16(__p0) __extension__ ({ \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_dup_s16(__p0) __extension__ ({ \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_dup_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_dup_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_dup_u64(__p0) __extension__ ({ \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_dup_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_dup_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_dup_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_dup_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_dup_s64(__p0) __extension__ ({ \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_dup_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
-  __ret; \
-})
-#else
-#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
-  __ret; \
-})
-#else
-#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
-  __ret; \
-})
-#else
-#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
-  __ret; \
-})
-#else
-#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
-  __ret; \
-})
-#else
-#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
-  __ret; \
-})
-#else
-#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p8_x2(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_p8_x2(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p16_x2(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_p16_x2(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p8_x2(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_p8_x2(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p16_x2(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_p16_x2(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u8_x2(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_u8_x2(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u32_x2(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_u32_x2(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u64_x2(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_u64_x2(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u16_x2(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_u16_x2(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s8_x2(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_s8_x2(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f32_x2(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_f32_x2(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s32_x2(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_s32_x2(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s64_x2(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_s64_x2(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s16_x2(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_s16_x2(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u8_x2(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_u8_x2(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u32_x2(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_u32_x2(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_u64_x2(__p0) __extension__ ({ \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u16_x2(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_u16_x2(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s8_x2(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_s8_x2(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f32_x2(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_f32_x2(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s32_x2(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_s32_x2(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_s64_x2(__p0) __extension__ ({ \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s16_x2(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_s16_x2(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p8_x3(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_p8_x3(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p16_x3(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_p16_x3(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p8_x3(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_p8_x3(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p16_x3(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_p16_x3(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u8_x3(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_u8_x3(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u32_x3(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_u32_x3(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u64_x3(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_u64_x3(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u16_x3(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_u16_x3(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s8_x3(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_s8_x3(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f32_x3(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_f32_x3(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s32_x3(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_s32_x3(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s64_x3(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_s64_x3(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s16_x3(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_s16_x3(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u8_x3(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_u8_x3(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u32_x3(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_u32_x3(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_u64_x3(__p0) __extension__ ({ \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u16_x3(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_u16_x3(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s8_x3(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_s8_x3(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f32_x3(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_f32_x3(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s32_x3(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_s32_x3(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_s64_x3(__p0) __extension__ ({ \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s16_x3(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_s16_x3(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p8_x4(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_p8_x4(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p16_x4(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_p16_x4(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p8_x4(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_p8_x4(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p16_x4(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_p16_x4(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u8_x4(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_u8_x4(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u32_x4(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_u32_x4(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u64_x4(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_u64_x4(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u16_x4(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_u16_x4(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s8_x4(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_s8_x4(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f32_x4(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_f32_x4(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s32_x4(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_s32_x4(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s64_x4(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_s64_x4(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s16_x4(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_s16_x4(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u8_x4(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_u8_x4(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u32_x4(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_u32_x4(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_u64_x4(__p0) __extension__ ({ \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u16_x4(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_u16_x4(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s8_x4(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_s8_x4(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f32_x4(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_f32_x4(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s32_x4(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_s32_x4(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_s64_x4(__p0) __extension__ ({ \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s16_x4(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_s16_x4(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_p8(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld2_p8(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_p16(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld2_p16(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_p8(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld2q_p8(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_p16(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld2q_p16(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u8(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld2q_u8(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u32(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld2q_u32(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u16(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld2q_u16(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s8(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld2q_s8(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_f32(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld2q_f32(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s32(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld2q_s32(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s16(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld2q_s16(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_u8(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld2_u8(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_u32(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld2_u32(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_u64(__p0) __extension__ ({ \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2_u16(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld2_u16(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_s8(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld2_s8(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_f32(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld2_f32(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_s32(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld2_s32(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_s64(__p0) __extension__ ({ \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2_s16(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld2_s16(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_p8(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld2_dup_p8(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_p16(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld2_dup_p16(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld2q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld2q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld2q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld2q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld2q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld2q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_s8(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld2q_dup_s8(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_f32(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld2q_dup_f32(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_s32(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld2q_dup_s32(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld2q_dup_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_s16(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld2q_dup_s16(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_u8(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld2_dup_u8(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_u32(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld2_dup_u32(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_dup_u64(__p0) __extension__ ({ \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_u16(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld2_dup_u16(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_s8(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld2_dup_s8(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_f32(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld2_dup_f32(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_s32(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld2_dup_s32(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_dup_s64(__p0) __extension__ ({ \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_s16(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld2_dup_s16(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
-  __ret; \
-})
-#else
-#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
-  __ret; \
-})
-#else
-#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
-  __ret; \
-})
-#else
-#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 41); \
-  __ret; \
-})
-#else
-#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 34); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 33); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
-  __ret; \
-})
-#else
-#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
-  __ret; \
-})
-#else
-#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
-  __ret; \
-})
-#else
-#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
-  __ret; \
-})
-#else
-#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 9); \
-  __ret; \
-})
-#else
-#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 2); \
-  __ret; \
-})
-#else
-#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 1); \
-  __ret; \
-})
-#else
-#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_p8(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld3_p8(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_p16(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld3_p16(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_p8(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld3q_p8(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_p16(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld3q_p16(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u8(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld3q_u8(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u32(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld3q_u32(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u16(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld3q_u16(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s8(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld3q_s8(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_f32(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld3q_f32(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s32(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld3q_s32(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s16(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld3q_s16(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_u8(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld3_u8(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_u32(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld3_u32(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_u64(__p0) __extension__ ({ \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3_u16(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld3_u16(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_s8(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld3_s8(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_f32(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld3_f32(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_s32(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld3_s32(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_s64(__p0) __extension__ ({ \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3_s16(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld3_s16(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_p8(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld3_dup_p8(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_p16(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld3_dup_p16(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld3q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld3q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld3q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld3q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld3q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld3q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_s8(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld3q_dup_s8(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_f32(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld3q_dup_f32(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_s32(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld3q_dup_s32(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld3q_dup_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_s16(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld3q_dup_s16(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_u8(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld3_dup_u8(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_u32(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld3_dup_u32(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_dup_u64(__p0) __extension__ ({ \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_u16(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld3_dup_u16(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_s8(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld3_dup_s8(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_f32(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld3_dup_f32(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_s32(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld3_dup_s32(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_dup_s64(__p0) __extension__ ({ \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_s16(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld3_dup_s16(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
-  __ret; \
-})
-#else
-#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
-  __ret; \
-})
-#else
-#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
-  __ret; \
-})
-#else
-#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \
-  __ret; \
-})
-#else
-#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
-  __ret; \
-})
-#else
-#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
-  __ret; \
-})
-#else
-#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
-  __ret; \
-})
-#else
-#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
-  __ret; \
-})
-#else
-#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \
-  __ret; \
-})
-#else
-#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \
-  __ret; \
-})
-#else
-#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \
-  __ret; \
-})
-#else
-#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_p8(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld4_p8(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_p16(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld4_p16(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_p8(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld4q_p8(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_p16(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld4q_p16(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u8(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld4q_u8(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u32(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld4q_u32(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u16(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld4q_u16(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s8(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld4q_s8(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_f32(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld4q_f32(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s32(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld4q_s32(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s16(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld4q_s16(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_u8(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld4_u8(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_u32(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld4_u32(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_u64(__p0) __extension__ ({ \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4_u16(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld4_u16(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_s8(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld4_s8(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_f32(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld4_f32(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_s32(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld4_s32(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_s64(__p0) __extension__ ({ \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4_s16(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld4_s16(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_p8(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld4_dup_p8(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_p16(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld4_dup_p16(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld4q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld4q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld4q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld4q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld4q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld4q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_s8(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld4q_dup_s8(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_f32(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld4q_dup_f32(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_s32(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld4q_dup_s32(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld4q_dup_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_s16(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld4q_dup_s16(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_u8(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld4_dup_u8(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_u32(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld4_dup_u32(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_dup_u64(__p0) __extension__ ({ \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_u16(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld4_dup_u16(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_s8(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld4_dup_s8(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_f32(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld4_dup_f32(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_s32(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld4_dup_s32(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_dup_s64(__p0) __extension__ ({ \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_s16(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld4_dup_s16(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
-  __ret; \
-})
-#else
-#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
-  __ret; \
-})
-#else
-#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
-  __ret; \
-})
-#else
-#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \
-  __ret; \
-})
-#else
-#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
-  __ret; \
-})
-#else
-#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
-  __ret; \
-})
-#else
-#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
-  __ret; \
-})
-#else
-#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
-  __ret; \
-})
-#else
-#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \
-  __ret; \
-})
-#else
-#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \
-  __ret; \
-})
-#else
-#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \
-  __ret; \
-})
-#else
-#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x8_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = __p0 + __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 + __p1 * (uint32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 + __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = __p0 + __p1 * (float32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 + __p1 * (int32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 + __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x8_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 - __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = __p0 - __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 - __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 - __p1 * (uint32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 - __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = __p0 - __p1 * (float32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 - __p1 * (int32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 - __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vmov_n_p8(poly8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly8x8_t vmov_n_p8(poly8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vmov_n_p16(poly16_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly16x4_t vmov_n_p16(poly16_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmovq_n_s8(int8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int8x16_t vmovq_n_s8(int8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmovq_n_f32(float32_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai float32x4_t vmovq_n_f32(float32_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovq_n_s32(int32_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int32x4_t vmovq_n_s32(int32_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmovq_n_s64(int64_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai int64x2_t vmovq_n_s64(int64_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovq_n_s16(int16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int16x8_t vmovq_n_s16(int16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmov_n_u8(uint8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint8x8_t vmov_n_u8(uint8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmov_n_u32(uint32_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai uint32x2_t vmov_n_u32(uint32_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vmov_n_u64(uint64_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmov_n_u16(uint16_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint16x4_t vmov_n_u16(uint16_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmov_n_s8(int8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int8x8_t vmov_n_s8(int8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmov_n_f32(float32_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float32x2_t vmov_n_f32(float32_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmov_n_s32(int32_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai int32x2_t vmov_n_s32(int32_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vmov_n_s64(int64_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmov_n_s16(int16_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int16x4_t vmov_n_s16(int16_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovl_s8(int8x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vmovl_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vmovl_s8(int8x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmovl_s32(int32x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vmovl_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmovl_s32(int32x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovl_s16(int16x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vmovl_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmovl_s16(int16x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmovn_s32(int32x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vmovn_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vmovn_s32(int32x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmovn_s64(int64x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vmovn_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vmovn_s64(int64x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmovn_s16(int16x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vmovn_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vmovn_s16(int16x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 * (float32x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 * (int32x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 * (uint32x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 * (uint32x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 * (float32x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 * (float32x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 * (int32x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 * (int32x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 * (int16x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 37);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmvnq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int8x16_t vmvnq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmvnq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int32x4_t vmvnq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmvnq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int16x8_t vmvnq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmvn_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int8x8_t vmvn_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmvn_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int32x2_t vmvn_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmvn_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int16x4_t vmvn_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vnegq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int8x16_t vnegq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vnegq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float32x4_t vnegq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vnegq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int32x4_t vnegq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vnegq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int16x8_t vnegq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vneg_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int8x8_t vneg_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vneg_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float32x2_t vneg_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vneg_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int32x2_t vneg_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vneg_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int16x4_t vneg_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#else
-__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#else
-__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#else
-__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpaddl_s8(int8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpaddl_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x1_t vpaddl_s32(int32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#else
-__ai int64x1_t vpaddl_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpaddl_s16(int16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpaddl_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqabsq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqabsq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqabsq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqabsq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqabsq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqabsq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqabs_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqabs_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqabs_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqabs_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqabs_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqabs_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = vqdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = vqdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = vqdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqmovn_s32(int32x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqmovn_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqmovn_s64(int64x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqmovn_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqmovn_s16(int16x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqmovn_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqnegq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqnegq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqnegq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqnegq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqnegq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqnegq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqneg_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqneg_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqneg_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqneg_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqneg_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqneg_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqrdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqrdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = vqrdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqrdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = vqrdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqrdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = vqrdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqrdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vqshl_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vqshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vqshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vqshl_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vqshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vqshlu_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrecpe_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrecpe_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrev16q_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  return __ret;
-}
-#else
-__ai int8x16_t vrev16q_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrev16_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai int8x8_t vrev16_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrev32q_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  return __ret;
-}
-#else
-__ai int8x16_t vrev32q_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrev32q_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai int16x8_t vrev32q_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrev32_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai int8x8_t vrev32_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrev32_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai int16x4_t vrev32_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrev64q_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  return __ret;
-}
-#else
-__ai int8x16_t vrev64q_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrev64q_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai float32x4_t vrev64q_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vrev64q_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai int32x4_t vrev64q_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrev64q_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai int16x8_t vrev64q_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrev64_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrev64_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrev64_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
-  return __ret;
-}
-#else
-__ai float32x2_t vrev64_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vrev64_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
-  return __ret;
-}
-#else
-__ai int32x2_t vrev64_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrev64_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai int16x4_t vrev64_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vrshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vrshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vrshr_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vrshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vrshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vrshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vrshr_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vrshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define vshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vshl_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vshl_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vshll_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vshll_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vshll_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vshll_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vshll_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vshll_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define vshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vshr_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vshr_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
-  __ret; \
-})
-#else
-#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
-  __ret; \
-})
-#else
-#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
-  __ret; \
-})
-#else
-#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
-  __ret; \
-})
-#else
-#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
-  __ret; \
-})
-#else
-#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
-  __ret; \
-})
-#else
-#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
-  __ret; \
-})
-#else
-#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
-  __ret; \
-})
-#else
-#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 4); \
-})
-#else
-#define vst1_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 5); \
-})
-#else
-#define vst1_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 36); \
-})
-#else
-#define vst1q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 37); \
-})
-#else
-#define vst1q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 48); \
-})
-#else
-#define vst1q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 50); \
-})
-#else
-#define vst1q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 51); \
-})
-#else
-#define vst1q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 49); \
-})
-#else
-#define vst1q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 32); \
-})
-#else
-#define vst1q_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 41); \
-})
-#else
-#define vst1q_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 34); \
-})
-#else
-#define vst1q_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 35); \
-})
-#else
-#define vst1q_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 33); \
-})
-#else
-#define vst1q_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 16); \
-})
-#else
-#define vst1_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 18); \
-})
-#else
-#define vst1_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 18); \
-})
-#endif
-
-#define vst1_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 17); \
-})
-#else
-#define vst1_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 0); \
-})
-#else
-#define vst1_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 9); \
-})
-#else
-#define vst1_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 2); \
-})
-#else
-#define vst1_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 2); \
-})
-#endif
-
-#define vst1_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 1); \
-})
-#else
-#define vst1_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
-})
-#else
-#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
-})
-#else
-#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
-})
-#else
-#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
-})
-#else
-#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
-})
-#else
-#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
-})
-#else
-#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
-})
-#else
-#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
-})
-#else
-#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
-})
-#else
-#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
-})
-#else
-#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
-})
-#else
-#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
-})
-#else
-#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
-})
-#else
-#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
-})
-#else
-#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
-})
-#else
-#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
-})
-#endif
-
-#define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
-})
-#else
-#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
-})
-#else
-#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
-})
-#else
-#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
-})
-#else
-#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
-})
-#endif
-
-#define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
-})
-#else
-#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p8_x2(__p0, __p1) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \
-})
-#else
-#define vst1_p8_x2(__p0, __p1) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p16_x2(__p0, __p1) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \
-})
-#else
-#define vst1_p16_x2(__p0, __p1) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \
-})
-#else
-#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \
-})
-#else
-#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \
-})
-#else
-#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \
-})
-#else
-#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \
-})
-#else
-#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \
-})
-#else
-#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \
-})
-#else
-#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 41); \
-})
-#else
-#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 34); \
-})
-#else
-#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 35); \
-})
-#else
-#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 33); \
-})
-#else
-#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u8_x2(__p0, __p1) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \
-})
-#else
-#define vst1_u8_x2(__p0, __p1) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u32_x2(__p0, __p1) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \
-})
-#else
-#define vst1_u32_x2(__p0, __p1) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
-})
-#endif
-
-#define vst1_u64_x2(__p0, __p1) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u16_x2(__p0, __p1) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \
-})
-#else
-#define vst1_u16_x2(__p0, __p1) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s8_x2(__p0, __p1) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \
-})
-#else
-#define vst1_s8_x2(__p0, __p1) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f32_x2(__p0, __p1) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 9); \
-})
-#else
-#define vst1_f32_x2(__p0, __p1) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s32_x2(__p0, __p1) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 2); \
-})
-#else
-#define vst1_s32_x2(__p0, __p1) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \
-})
-#endif
-
-#define vst1_s64_x2(__p0, __p1) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s16_x2(__p0, __p1) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 1); \
-})
-#else
-#define vst1_s16_x2(__p0, __p1) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p8_x3(__p0, __p1) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \
-})
-#else
-#define vst1_p8_x3(__p0, __p1) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p16_x3(__p0, __p1) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \
-})
-#else
-#define vst1_p16_x3(__p0, __p1) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \
-})
-#else
-#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \
-})
-#else
-#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \
-})
-#else
-#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \
-})
-#else
-#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \
-})
-#else
-#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \
-})
-#else
-#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \
-})
-#else
-#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 41); \
-})
-#else
-#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 34); \
-})
-#else
-#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 35); \
-})
-#else
-#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 33); \
-})
-#else
-#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u8_x3(__p0, __p1) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \
-})
-#else
-#define vst1_u8_x3(__p0, __p1) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u32_x3(__p0, __p1) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \
-})
-#else
-#define vst1_u32_x3(__p0, __p1) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
-})
-#endif
-
-#define vst1_u64_x3(__p0, __p1) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u16_x3(__p0, __p1) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \
-})
-#else
-#define vst1_u16_x3(__p0, __p1) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s8_x3(__p0, __p1) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \
-})
-#else
-#define vst1_s8_x3(__p0, __p1) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f32_x3(__p0, __p1) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 9); \
-})
-#else
-#define vst1_f32_x3(__p0, __p1) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s32_x3(__p0, __p1) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 2); \
-})
-#else
-#define vst1_s32_x3(__p0, __p1) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \
-})
-#endif
-
-#define vst1_s64_x3(__p0, __p1) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s16_x3(__p0, __p1) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 1); \
-})
-#else
-#define vst1_s16_x3(__p0, __p1) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p8_x4(__p0, __p1) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \
-})
-#else
-#define vst1_p8_x4(__p0, __p1) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p16_x4(__p0, __p1) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \
-})
-#else
-#define vst1_p16_x4(__p0, __p1) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \
-})
-#else
-#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \
-})
-#else
-#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \
-})
-#else
-#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \
-})
-#else
-#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \
-})
-#else
-#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \
-})
-#else
-#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \
-})
-#else
-#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 41); \
-})
-#else
-#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 34); \
-})
-#else
-#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 35); \
-})
-#else
-#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 33); \
-})
-#else
-#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u8_x4(__p0, __p1) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \
-})
-#else
-#define vst1_u8_x4(__p0, __p1) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u32_x4(__p0, __p1) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \
-})
-#else
-#define vst1_u32_x4(__p0, __p1) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
-})
-#endif
-
-#define vst1_u64_x4(__p0, __p1) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u16_x4(__p0, __p1) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \
-})
-#else
-#define vst1_u16_x4(__p0, __p1) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s8_x4(__p0, __p1) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \
-})
-#else
-#define vst1_s8_x4(__p0, __p1) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f32_x4(__p0, __p1) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 9); \
-})
-#else
-#define vst1_f32_x4(__p0, __p1) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s32_x4(__p0, __p1) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 2); \
-})
-#else
-#define vst1_s32_x4(__p0, __p1) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \
-})
-#endif
-
-#define vst1_s64_x4(__p0, __p1) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s16_x4(__p0, __p1) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 1); \
-})
-#else
-#define vst1_s16_x4(__p0, __p1) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \
-})
-#else
-#define vst2_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \
-})
-#else
-#define vst2_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \
-})
-#else
-#define vst2q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \
-})
-#else
-#define vst2q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \
-})
-#else
-#define vst2q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \
-})
-#else
-#define vst2q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \
-})
-#else
-#define vst2q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \
-})
-#else
-#define vst2q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 41); \
-})
-#else
-#define vst2q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 34); \
-})
-#else
-#define vst2q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 33); \
-})
-#else
-#define vst2q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \
-})
-#else
-#define vst2_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \
-})
-#else
-#define vst2_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
-})
-#endif
-
-#define vst2_u64(__p0, __p1) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst2_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \
-})
-#else
-#define vst2_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_s8(__p0, __p1) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \
-})
-#else
-#define vst2_s8(__p0, __p1) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_f32(__p0, __p1) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 9); \
-})
-#else
-#define vst2_f32(__p0, __p1) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_s32(__p0, __p1) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 2); \
-})
-#else
-#define vst2_s32(__p0, __p1) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \
-})
-#endif
-
-#define vst2_s64(__p0, __p1) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst2_s16(__p0, __p1) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 1); \
-})
-#else
-#define vst2_s16(__p0, __p1) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
-})
-#else
-#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
-})
-#else
-#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
-})
-#else
-#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
-})
-#else
-#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
-})
-#else
-#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 41); \
-})
-#else
-#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 34); \
-})
-#else
-#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 33); \
-})
-#else
-#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
-})
-#else
-#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
-})
-#else
-#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
-})
-#else
-#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
-})
-#else
-#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 9); \
-})
-#else
-#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 2); \
-})
-#else
-#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 2); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 1); \
-})
-#else
-#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \
-})
-#else
-#define vst3_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \
-})
-#else
-#define vst3_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \
-})
-#else
-#define vst3q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \
-})
-#else
-#define vst3q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \
-})
-#else
-#define vst3q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \
-})
-#else
-#define vst3q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \
-})
-#else
-#define vst3q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \
-})
-#else
-#define vst3q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 41); \
-})
-#else
-#define vst3q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 34); \
-})
-#else
-#define vst3q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 33); \
-})
-#else
-#define vst3q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \
-})
-#else
-#define vst3_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \
-})
-#else
-#define vst3_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
-})
-#endif
-
-#define vst3_u64(__p0, __p1) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst3_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \
-})
-#else
-#define vst3_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_s8(__p0, __p1) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \
-})
-#else
-#define vst3_s8(__p0, __p1) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_f32(__p0, __p1) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 9); \
-})
-#else
-#define vst3_f32(__p0, __p1) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_s32(__p0, __p1) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 2); \
-})
-#else
-#define vst3_s32(__p0, __p1) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \
-})
-#endif
-
-#define vst3_s64(__p0, __p1) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst3_s16(__p0, __p1) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 1); \
-})
-#else
-#define vst3_s16(__p0, __p1) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
-})
-#else
-#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
-})
-#else
-#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
-})
-#else
-#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
-})
-#else
-#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
-})
-#else
-#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \
-})
-#else
-#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \
-})
-#else
-#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \
-})
-#else
-#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
-})
-#else
-#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
-})
-#else
-#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
-})
-#else
-#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
-})
-#else
-#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \
-})
-#else
-#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \
-})
-#else
-#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \
-})
-#else
-#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \
-})
-#else
-#define vst4_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \
-})
-#else
-#define vst4_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \
-})
-#else
-#define vst4q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \
-})
-#else
-#define vst4q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \
-})
-#else
-#define vst4q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \
-})
-#else
-#define vst4q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \
-})
-#else
-#define vst4q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \
-})
-#else
-#define vst4q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 41); \
-})
-#else
-#define vst4q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 34); \
-})
-#else
-#define vst4q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 33); \
-})
-#else
-#define vst4q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \
-})
-#else
-#define vst4_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \
-})
-#else
-#define vst4_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
-})
-#endif
-
-#define vst4_u64(__p0, __p1) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst4_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \
-})
-#else
-#define vst4_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_s8(__p0, __p1) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \
-})
-#else
-#define vst4_s8(__p0, __p1) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_f32(__p0, __p1) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 9); \
-})
-#else
-#define vst4_f32(__p0, __p1) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_s32(__p0, __p1) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 2); \
-})
-#else
-#define vst4_s32(__p0, __p1) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \
-})
-#endif
-
-#define vst4_s64(__p0, __p1) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst4_s16(__p0, __p1) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 1); \
-})
-#else
-#define vst4_s16(__p0, __p1) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
-})
-#else
-#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
-})
-#else
-#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
-})
-#else
-#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
-})
-#else
-#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
-})
-#else
-#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \
-})
-#else
-#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \
-})
-#else
-#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \
-})
-#else
-#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
-})
-#else
-#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
-})
-#else
-#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
-})
-#else
-#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
-})
-#else
-#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \
-})
-#else
-#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \
-})
-#else
-#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \
-})
-#else
-#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmovl_u8(__p0) - vmovl_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmovl_u32(__p0) - vmovl_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmovl_u16(__p0) - vmovl_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = vmovl_s8(__p0) - vmovl_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = vmovl_s32(__p0) - vmovl_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = vmovl_s16(__p0) - vmovl_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 - vmovl_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __noswap_vmovl_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 - vmovl_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __noswap_vmovl_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 - vmovl_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __noswap_vmovl_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 - vmovl_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __noswap_vmovl_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 - vmovl_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __noswap_vmovl_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 - vmovl_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __noswap_vmovl_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
-  poly8x8x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
-  uint8x8x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
-  int8x8x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
-  poly8x8x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
-  uint8x8x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
-  int8x8x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
-  poly8x8x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
-  uint8x8x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
-  int8x8x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#if !defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-#endif
-#if (__ARM_FP & 2)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f16(__p0) __extension__ ({ \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_f16(__p0) __extension__ ({ \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_f16(__p0) __extension__ ({ \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_dup_f16(__p0) __extension__ ({ \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_dup_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
-  __ret; \
-})
-#else
-#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
-  __ret; \
-})
-#else
-#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f16_x2(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_f16_x2(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f16_x2(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_f16_x2(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f16_x3(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_f16_x3(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f16_x3(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_f16_x3(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f16_x4(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_f16_x4(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f16_x4(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_f16_x4(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_f16(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld2q_f16(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_f16(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld2_f16(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_f16(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld2q_dup_f16(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_f16(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld2_dup_f16(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 40); \
-  __ret; \
-})
-#else
-#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 8); \
-  __ret; \
-})
-#else
-#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_f16(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld3q_f16(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_f16(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld3_f16(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_f16(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld3q_dup_f16(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_f16(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld3_dup_f16(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \
-  __ret; \
-})
-#else
-#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \
-  __ret; \
-})
-#else
-#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_f16(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld4q_f16(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_f16(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld4_f16(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_f16(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld4q_dup_f16(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_f16(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld4_dup_f16(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \
-  __ret; \
-})
-#else
-#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \
-  __ret; \
-})
-#else
-#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 40); \
-})
-#else
-#define vst1q_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 8); \
-})
-#else
-#define vst1_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
-})
-#else
-#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
-})
-#else
-#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 40); \
-})
-#else
-#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f16_x2(__p0, __p1) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 8); \
-})
-#else
-#define vst1_f16_x2(__p0, __p1) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 40); \
-})
-#else
-#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f16_x3(__p0, __p1) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 8); \
-})
-#else
-#define vst1_f16_x3(__p0, __p1) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 40); \
-})
-#else
-#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f16_x4(__p0, __p1) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 8); \
-})
-#else
-#define vst1_f16_x4(__p0, __p1) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 40); \
-})
-#else
-#define vst2q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_f16(__p0, __p1) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 8); \
-})
-#else
-#define vst2_f16(__p0, __p1) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 40); \
-})
-#else
-#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 8); \
-})
-#else
-#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 40); \
-})
-#else
-#define vst3q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_f16(__p0, __p1) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 8); \
-})
-#else
-#define vst3_f16(__p0, __p1) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \
-})
-#else
-#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \
-})
-#else
-#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 40); \
-})
-#else
-#define vst4q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_f16(__p0, __p1) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 8); \
-})
-#else
-#define vst4_f16(__p0, __p1) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \
-})
-#else
-#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \
-})
-#else
-#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \
-})
-#endif
-
-#endif
-#if __ARM_ARCH >= 8
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_CRYPTO)
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint32_t vsha1h_u32(uint32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnd_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnd_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndaq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndaq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnda_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnda_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndiq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndiq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndi_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndi_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndmq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndmq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndm_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndm_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndnq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndnq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndn_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndn_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float32_t vrndns_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrndns_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndpq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndpq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndp_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndp_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndxq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndxq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndx_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndx_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrnd_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrnd_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndaq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndaq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrnda_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrnda_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndmq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndmq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndm_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndm_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndnq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndnq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndn_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndn_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndpq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndpq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndp_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndp_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndxq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndxq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndx_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndx_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvta_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrnd_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndaq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndaq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrnda_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndiq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndiq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndi_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndmq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndmq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndm_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndnq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndnq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndn_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndpq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndpq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndp_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndxq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndxq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndx_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#endif
-#if defined(__ARM_FEATURE_DOTPROD)
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x8_t __s2 = __p2; \
-  uint32x4_t __ret; \
-uint8x8_t __reint = __s2; \
-uint32x4_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = vdotq_u32(__s0, __s1, *(uint8x16_t *) &__reint1); \
-  __ret; \
-})
-#else
-#define vdotq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x8_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-uint8x8_t __reint = __rev2; \
-uint32x4_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = __noswap_vdotq_u32(__rev0, __rev1, *(uint8x16_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-int8x8_t __reint = __s2; \
-int32x4_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = vdotq_s32(__s0, __s1, *(int8x16_t *) &__reint1); \
-  __ret; \
-})
-#else
-#define vdotq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-int8x8_t __reint = __rev2; \
-int32x4_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = __noswap_vdotq_s32(__rev0, __rev1, *(int8x16_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __s2 = __p2; \
-  uint32x2_t __ret; \
-uint8x8_t __reint = __s2; \
-uint32x2_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3); \
-  __ret = vdot_u32(__s0, __s1, *(uint8x8_t *) &__reint1); \
-  __ret; \
-})
-#else
-#define vdot_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-uint8x8_t __reint = __rev2; \
-uint32x2_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3); \
-  __ret = __noswap_vdot_u32(__rev0, __rev1, *(uint8x8_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __s2 = __p2; \
-  int32x2_t __ret; \
-int8x8_t __reint = __s2; \
-int32x2_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3); \
-  __ret = vdot_s32(__s0, __s1, *(int8x8_t *) &__reint1); \
-  __ret; \
-})
-#else
-#define vdot_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-int8x8_t __reint = __rev2; \
-int32x2_t __reint1 = __builtin_shufflevector(*(uint32x2_t *) &__reint, *(uint32x2_t *) &__reint, __p3, __p3); \
-  __ret = __noswap_vdot_s32(__rev0, __rev1, *(int8x8_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_DOTPROD) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __s2 = __p2; \
-  uint32x4_t __ret; \
-uint8x16_t __reint = __s2; \
-uint32x4_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = vdotq_u32(__s0, __s1, *(uint8x16_t *) &__reint1); \
-  __ret; \
-})
-#else
-#define vdotq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-uint8x16_t __reint = __rev2; \
-uint32x4_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = __noswap_vdotq_u32(__rev0, __rev1, *(uint8x16_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __s2 = __p2; \
-  int32x4_t __ret; \
-int8x16_t __reint = __s2; \
-int32x4_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = vdotq_s32(__s0, __s1, *(int8x16_t *) &__reint1); \
-  __ret; \
-})
-#else
-#define vdotq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-int8x16_t __reint = __rev2; \
-int32x4_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3, __p3, __p3); \
-  __ret = __noswap_vdotq_s32(__rev0, __rev1, *(int8x16_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x16_t __s2 = __p2; \
-  uint32x2_t __ret; \
-uint8x16_t __reint = __s2; \
-uint32x2_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3); \
-  __ret = vdot_u32(__s0, __s1, *(uint8x8_t *) &__reint1); \
-  __ret; \
-})
-#else
-#define vdot_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x16_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-uint8x16_t __reint = __rev2; \
-uint32x2_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3); \
-  __ret = __noswap_vdot_u32(__rev0, __rev1, *(uint8x8_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x16_t __s2 = __p2; \
-  int32x2_t __ret; \
-int8x16_t __reint = __s2; \
-int32x2_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3); \
-  __ret = vdot_s32(__s0, __s1, *(int8x8_t *) &__reint1); \
-  __ret; \
-})
-#else
-#define vdot_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x16_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-int8x16_t __reint = __rev2; \
-int32x2_t __reint1 = __builtin_shufflevector(*(uint32x4_t *) &__reint, *(uint32x4_t *) &__reint, __p3, __p3); \
-  __ret = __noswap_vdot_s32(__rev0, __rev1, *(int8x8_t *) &__reint1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FMA)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vfmaq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __noswap_vfma_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = vfmaq_f32(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vfmaq_f32(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = vfma_f32(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = __noswap_vfma_f32(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16FML) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vabsq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vabsq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vabs_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vabs_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgez_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgez_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vclezq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vclezq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclez_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclez_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcltz_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcltz_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \
-  __ret; \
-})
-#else
-#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \
-  __ret; \
-})
-#else
-#define vext_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = vfmaq_f16(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = vfma_f16(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
-  __ret; \
-})
-#else
-#define vmulq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
-  __ret; \
-})
-#else
-#define vmul_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vnegq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float16x8_t vnegq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vneg_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float16x4_t vneg_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrecpe_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrecpe_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrev64q_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai float16x8_t vrev64q_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrev64_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai float16x4_t vrev64_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_lane_f16((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_lane_f16((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (int8x8_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (int8x8_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (int8x8_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
-  __ret; \
-})
-#else
-#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
-  __ret; \
-})
-#else
-#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (int8x16_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (int8x16_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (int8x16_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
-  __ret; \
-})
-#else
-#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
-  __ret; \
-})
-#else
-#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsh_lane_f16(__p0_0, __p1_0, __p2_0, __p3_0) __extension__ ({ \
-  float16_t __s0_0 = __p0_0; \
-  float16_t __s1_0 = __p1_0; \
-  float16x4_t __s2_0 = __p2_0; \
-  float16_t __ret_0; \
-  __ret_0 = vfmah_lane_f16(__s0_0, -__s1_0, __s2_0, __p3_0); \
-  __ret_0; \
-})
-#else
-#define vfmsh_lane_f16(__p0_1, __p1_1, __p2_1, __p3_1) __extension__ ({ \
-  float16_t __s0_1 = __p0_1; \
-  float16_t __s1_1 = __p1_1; \
-  float16x4_t __s2_1 = __p2_1; \
-  float16x4_t __rev2_1;  __rev2_1 = __builtin_shufflevector(__s2_1, __s2_1, 3, 2, 1, 0); \
-  float16_t __ret_1; \
-  __ret_1 = __noswap_vfmah_lane_f16(__s0_1, -__s1_1, __rev2_1, __p3_1); \
-  __ret_1; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f16(__p0_2, __p1_2, __p2_2, __p3_2) __extension__ ({ \
-  float16x8_t __s0_2 = __p0_2; \
-  float16x8_t __s1_2 = __p1_2; \
-  float16x4_t __s2_2 = __p2_2; \
-  float16x8_t __ret_2; \
-  __ret_2 = vfmaq_lane_f16(__s0_2, -__s1_2, __s2_2, __p3_2); \
-  __ret_2; \
-})
-#else
-#define vfmsq_lane_f16(__p0_3, __p1_3, __p2_3, __p3_3) __extension__ ({ \
-  float16x8_t __s0_3 = __p0_3; \
-  float16x8_t __s1_3 = __p1_3; \
-  float16x4_t __s2_3 = __p2_3; \
-  float16x8_t __rev0_3;  __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_3;  __rev1_3 = __builtin_shufflevector(__s1_3, __s1_3, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_3;  __rev2_3 = __builtin_shufflevector(__s2_3, __s2_3, 3, 2, 1, 0); \
-  float16x8_t __ret_3; \
-  __ret_3 = __noswap_vfmaq_lane_f16(__rev0_3, -__rev1_3, __rev2_3, __p3_3); \
-  __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_3; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_lane_f16(__p0_4, __p1_4, __p2_4, __p3_4) __extension__ ({ \
-  float16x4_t __s0_4 = __p0_4; \
-  float16x4_t __s1_4 = __p1_4; \
-  float16x4_t __s2_4 = __p2_4; \
-  float16x4_t __ret_4; \
-  __ret_4 = vfma_lane_f16(__s0_4, -__s1_4, __s2_4, __p3_4); \
-  __ret_4; \
-})
-#else
-#define vfms_lane_f16(__p0_5, __p1_5, __p2_5, __p3_5) __extension__ ({ \
-  float16x4_t __s0_5 = __p0_5; \
-  float16x4_t __s1_5 = __p1_5; \
-  float16x4_t __s2_5 = __p2_5; \
-  float16x4_t __rev0_5;  __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 3, 2, 1, 0); \
-  float16x4_t __rev1_5;  __rev1_5 = __builtin_shufflevector(__s1_5, __s1_5, 3, 2, 1, 0); \
-  float16x4_t __rev2_5;  __rev2_5 = __builtin_shufflevector(__s2_5, __s2_5, 3, 2, 1, 0); \
-  float16x4_t __ret_5; \
-  __ret_5 = __noswap_vfma_lane_f16(__rev0_5, -__rev1_5, __rev2_5, __p3_5); \
-  __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 3, 2, 1, 0); \
-  __ret_5; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsh_laneq_f16(__p0_6, __p1_6, __p2_6, __p3_6) __extension__ ({ \
-  float16_t __s0_6 = __p0_6; \
-  float16_t __s1_6 = __p1_6; \
-  float16x8_t __s2_6 = __p2_6; \
-  float16_t __ret_6; \
-  __ret_6 = vfmah_laneq_f16(__s0_6, -__s1_6, __s2_6, __p3_6); \
-  __ret_6; \
-})
-#else
-#define vfmsh_laneq_f16(__p0_7, __p1_7, __p2_7, __p3_7) __extension__ ({ \
-  float16_t __s0_7 = __p0_7; \
-  float16_t __s1_7 = __p1_7; \
-  float16x8_t __s2_7 = __p2_7; \
-  float16x8_t __rev2_7;  __rev2_7 = __builtin_shufflevector(__s2_7, __s2_7, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_7; \
-  __ret_7 = __noswap_vfmah_laneq_f16(__s0_7, -__s1_7, __rev2_7, __p3_7); \
-  __ret_7; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f16(__p0_8, __p1_8, __p2_8, __p3_8) __extension__ ({ \
-  float16x8_t __s0_8 = __p0_8; \
-  float16x8_t __s1_8 = __p1_8; \
-  float16x8_t __s2_8 = __p2_8; \
-  float16x8_t __ret_8; \
-  __ret_8 = vfmaq_laneq_f16(__s0_8, -__s1_8, __s2_8, __p3_8); \
-  __ret_8; \
-})
-#else
-#define vfmsq_laneq_f16(__p0_9, __p1_9, __p2_9, __p3_9) __extension__ ({ \
-  float16x8_t __s0_9 = __p0_9; \
-  float16x8_t __s1_9 = __p1_9; \
-  float16x8_t __s2_9 = __p2_9; \
-  float16x8_t __rev0_9;  __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_9;  __rev1_9 = __builtin_shufflevector(__s1_9, __s1_9, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_9;  __rev2_9 = __builtin_shufflevector(__s2_9, __s2_9, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_9; \
-  __ret_9 = __noswap_vfmaq_laneq_f16(__rev0_9, -__rev1_9, __rev2_9, __p3_9); \
-  __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_9; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f16(__p0_10, __p1_10, __p2_10, __p3_10) __extension__ ({ \
-  float16x4_t __s0_10 = __p0_10; \
-  float16x4_t __s1_10 = __p1_10; \
-  float16x8_t __s2_10 = __p2_10; \
-  float16x4_t __ret_10; \
-  __ret_10 = vfma_laneq_f16(__s0_10, -__s1_10, __s2_10, __p3_10); \
-  __ret_10; \
-})
-#else
-#define vfms_laneq_f16(__p0_11, __p1_11, __p2_11, __p3_11) __extension__ ({ \
-  float16x4_t __s0_11 = __p0_11; \
-  float16x4_t __s1_11 = __p1_11; \
-  float16x8_t __s2_11 = __p2_11; \
-  float16x4_t __rev0_11;  __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 3, 2, 1, 0); \
-  float16x4_t __rev1_11;  __rev1_11 = __builtin_shufflevector(__s1_11, __s1_11, 3, 2, 1, 0); \
-  float16x8_t __rev2_11;  __rev2_11 = __builtin_shufflevector(__s2_11, __s2_11, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_11; \
-  __ret_11 = __noswap_vfma_laneq_f16(__rev0_11, -__rev1_11, __rev2_11, __p3_11); \
-  __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \
-  __ret_11; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vminnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vminnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vminvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vminv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (int8x8_t)__rev1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = vmulxq_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmulxq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vmulxq_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = vmulx_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmulx_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vmulx_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (int8x16_t)__rev1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = vmulxq_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmulxq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vmulxq_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = vmulx_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmulx_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vmulx_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
-  __ret; \
-})
-#else
-#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
-  __ret; \
-})
-#else
-#define vmulx_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndiq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndiq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndi_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndi_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vsqrt_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vsqrt_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_QRDMX)
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = vqaddq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = vqaddq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = vqadd_s32(__p0, vqrdmulh_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = vqadd_s16(__p0, vqrdmulh_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqaddq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlahq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = vqaddq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlahq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = vqadd_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlah_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = vqadd_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlah_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = vqsubq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = vqsubq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = vqsub_s32(__p0, vqrdmulh_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = vqsub_s16(__p0, vqrdmulh_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqsubq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlshq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = vqsubq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlshq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = vqsub_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlsh_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = vqsub_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlsh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqaddq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlahq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = vqaddq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlahq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = vqadd_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlah_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = vqadd_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlah_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqsubq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlshq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = vqsubq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlshq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = vqsub_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlsh_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = vqsub_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
-  __ret; \
-})
-#else
-#define vqrdmlsh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#endif
-#if defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-__ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vabds_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vabsq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vabsq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabsq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vabsq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vabs_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-__ai int64x1_t vabs_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai int64_t vabsd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vabsd_s64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-__ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlvq_u8((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlvq_u8((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlvq_u32((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlvq_u32((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlvq_u16((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlvq_u16((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddlvq_s8(int8x16_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlvq_s8((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddlvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlvq_s8((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vaddlvq_s32(int32x4_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlvq_s32((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai int64_t vaddlvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlvq_s32((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddlvq_s16(int16x8_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlvq_s16((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddlvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlvq_s16((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddlv_u8(uint8x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlv_u8((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddlv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlv_u8((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vaddlv_u32(uint32x2_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlv_u32((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vaddlv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlv_u32((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddlv_u16(uint16x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlv_u16((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddlv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlv_u16((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddlv_s8(int8x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlv_s8((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddlv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlv_s8((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vaddlv_s32(int32x2_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlv_s32((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai int64_t vaddlv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlv_s32((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddlv_s16(int16x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlv_s16((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddlv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlv_s16((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vaddvq_u8(uint8x16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddvq_u8((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vaddvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddvq_u8((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddvq_u32(uint32x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddvq_u32((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddvq_u32((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vaddvq_u64(uint64x2_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddvq_u64((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vaddvq_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddvq_u64((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddvq_u16(uint16x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddvq_u16((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddvq_u16((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vaddvq_s8(int8x16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddvq_s8((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai int8_t vaddvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddvq_s8((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vaddvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vaddvq_f64((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai float64_t vaddvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vaddvq_f64((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vaddvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddvq_f32((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai float32_t vaddvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddvq_f32((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddvq_s32(int32x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddvq_s32((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddvq_s32((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vaddvq_s64(int64x2_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddvq_s64((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai int64_t vaddvq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddvq_s64((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddvq_s16(int16x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddvq_s16((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddvq_s16((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vaddv_u8(uint8x8_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddv_u8((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vaddv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddv_u8((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddv_u32(uint32x2_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddv_u32((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddv_u32((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddv_u16(uint16x4_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddv_u16((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddv_u16((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vaddv_s8(int8x8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddv_s8((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai int8_t vaddv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddv_s8((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vaddv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddv_f32((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai float32_t vaddv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddv_f32((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddv_s32(int32x2_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddv_s32((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddv_s32((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddv_s16(int16x4_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddv_s16((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddv_s16((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-__ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38);
-  return __ret;
-}
-#else
-__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1);
-  return __ret;
-}
-__ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-__ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-__ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-__ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vceqd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vceqd_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_p64(poly64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_p16(poly16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_p16(poly16x4_t __p0) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_p16(poly16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_u64(uint64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceqz_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceqz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceqz_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceqz_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceqz_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceqz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vceqzd_u64(uint64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0);
-  return __ret;
-}
-__ai int64_t vceqzd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vceqzd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vceqzd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vceqzs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 >= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 >= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 >= __p1);
-  return __ret;
-}
-__ai int64_t vcged_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcged_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgez_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgez_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgez_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgez_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgez_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgez_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgez_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgez_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgez_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgez_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64_t vcgezd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcgezd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vcgezd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcgezs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 > __p1);
-  return __ret;
-}
-__ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 > __p1);
-  return __ret;
-}
-__ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 > __p1);
-  return __ret;
-}
-__ai int64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgtz_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgtz_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64_t vcgtzd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcgtzd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vcgtzd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcgtzs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 <= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 <= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 <= __p1);
-  return __ret;
-}
-__ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vcled_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcled_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vclezq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vclezq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vclezq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vclezq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vclezq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vclezq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vclezq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vclezq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vclezq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vclezq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vclezq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vclezq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclez_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclez_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vclez_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclez_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclez_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclez_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclez_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vclez_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclez_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclez_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64_t vclezd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vclezd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vclezd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vclezs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 < __p1);
-  return __ret;
-}
-__ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 < __p1);
-  return __ret;
-}
-__ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 < __p1);
-  return __ret;
-}
-__ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vcltd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcltd_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcltz_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcltz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcltz_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcltz_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcltz_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcltz_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcltz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcltz_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcltz_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcltz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64_t vcltzd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcltzd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vcltzd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcltzs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p8(__p0_12, __p1_12, __p2_12, __p3_12) __extension__ ({ \
-  poly8x16_t __s0_12 = __p0_12; \
-  poly8x8_t __s2_12 = __p2_12; \
-  poly8x16_t __ret_12; \
-  __ret_12 = vsetq_lane_p8(vget_lane_p8(__s2_12, __p3_12), __s0_12, __p1_12); \
-  __ret_12; \
-})
-#else
-#define vcopyq_lane_p8(__p0_13, __p1_13, __p2_13, __p3_13) __extension__ ({ \
-  poly8x16_t __s0_13 = __p0_13; \
-  poly8x8_t __s2_13 = __p2_13; \
-  poly8x16_t __rev0_13;  __rev0_13 = __builtin_shufflevector(__s0_13, __s0_13, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_13;  __rev2_13 = __builtin_shufflevector(__s2_13, __s2_13, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_13; \
-  __ret_13 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_13, __p3_13), __rev0_13, __p1_13); \
-  __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_13; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p16(__p0_14, __p1_14, __p2_14, __p3_14) __extension__ ({ \
-  poly16x8_t __s0_14 = __p0_14; \
-  poly16x4_t __s2_14 = __p2_14; \
-  poly16x8_t __ret_14; \
-  __ret_14 = vsetq_lane_p16(vget_lane_p16(__s2_14, __p3_14), __s0_14, __p1_14); \
-  __ret_14; \
-})
-#else
-#define vcopyq_lane_p16(__p0_15, __p1_15, __p2_15, __p3_15) __extension__ ({ \
-  poly16x8_t __s0_15 = __p0_15; \
-  poly16x4_t __s2_15 = __p2_15; \
-  poly16x8_t __rev0_15;  __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __rev2_15;  __rev2_15 = __builtin_shufflevector(__s2_15, __s2_15, 3, 2, 1, 0); \
-  poly16x8_t __ret_15; \
-  __ret_15 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_15, __p3_15), __rev0_15, __p1_15); \
-  __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_15; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u8(__p0_16, __p1_16, __p2_16, __p3_16) __extension__ ({ \
-  uint8x16_t __s0_16 = __p0_16; \
-  uint8x8_t __s2_16 = __p2_16; \
-  uint8x16_t __ret_16; \
-  __ret_16 = vsetq_lane_u8(vget_lane_u8(__s2_16, __p3_16), __s0_16, __p1_16); \
-  __ret_16; \
-})
-#else
-#define vcopyq_lane_u8(__p0_17, __p1_17, __p2_17, __p3_17) __extension__ ({ \
-  uint8x16_t __s0_17 = __p0_17; \
-  uint8x8_t __s2_17 = __p2_17; \
-  uint8x16_t __rev0_17;  __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_17;  __rev2_17 = __builtin_shufflevector(__s2_17, __s2_17, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_17; \
-  __ret_17 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_17, __p3_17), __rev0_17, __p1_17); \
-  __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_17; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u32(__p0_18, __p1_18, __p2_18, __p3_18) __extension__ ({ \
-  uint32x4_t __s0_18 = __p0_18; \
-  uint32x2_t __s2_18 = __p2_18; \
-  uint32x4_t __ret_18; \
-  __ret_18 = vsetq_lane_u32(vget_lane_u32(__s2_18, __p3_18), __s0_18, __p1_18); \
-  __ret_18; \
-})
-#else
-#define vcopyq_lane_u32(__p0_19, __p1_19, __p2_19, __p3_19) __extension__ ({ \
-  uint32x4_t __s0_19 = __p0_19; \
-  uint32x2_t __s2_19 = __p2_19; \
-  uint32x4_t __rev0_19;  __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 3, 2, 1, 0); \
-  uint32x2_t __rev2_19;  __rev2_19 = __builtin_shufflevector(__s2_19, __s2_19, 1, 0); \
-  uint32x4_t __ret_19; \
-  __ret_19 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_19, __p3_19), __rev0_19, __p1_19); \
-  __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 3, 2, 1, 0); \
-  __ret_19; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u64(__p0_20, __p1_20, __p2_20, __p3_20) __extension__ ({ \
-  uint64x2_t __s0_20 = __p0_20; \
-  uint64x1_t __s2_20 = __p2_20; \
-  uint64x2_t __ret_20; \
-  __ret_20 = vsetq_lane_u64(vget_lane_u64(__s2_20, __p3_20), __s0_20, __p1_20); \
-  __ret_20; \
-})
-#else
-#define vcopyq_lane_u64(__p0_21, __p1_21, __p2_21, __p3_21) __extension__ ({ \
-  uint64x2_t __s0_21 = __p0_21; \
-  uint64x1_t __s2_21 = __p2_21; \
-  uint64x2_t __rev0_21;  __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 1, 0); \
-  uint64x2_t __ret_21; \
-  __ret_21 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_21, __p3_21), __rev0_21, __p1_21); \
-  __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 1, 0); \
-  __ret_21; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u16(__p0_22, __p1_22, __p2_22, __p3_22) __extension__ ({ \
-  uint16x8_t __s0_22 = __p0_22; \
-  uint16x4_t __s2_22 = __p2_22; \
-  uint16x8_t __ret_22; \
-  __ret_22 = vsetq_lane_u16(vget_lane_u16(__s2_22, __p3_22), __s0_22, __p1_22); \
-  __ret_22; \
-})
-#else
-#define vcopyq_lane_u16(__p0_23, __p1_23, __p2_23, __p3_23) __extension__ ({ \
-  uint16x8_t __s0_23 = __p0_23; \
-  uint16x4_t __s2_23 = __p2_23; \
-  uint16x8_t __rev0_23;  __rev0_23 = __builtin_shufflevector(__s0_23, __s0_23, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_23;  __rev2_23 = __builtin_shufflevector(__s2_23, __s2_23, 3, 2, 1, 0); \
-  uint16x8_t __ret_23; \
-  __ret_23 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_23, __p3_23), __rev0_23, __p1_23); \
-  __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_23; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s8(__p0_24, __p1_24, __p2_24, __p3_24) __extension__ ({ \
-  int8x16_t __s0_24 = __p0_24; \
-  int8x8_t __s2_24 = __p2_24; \
-  int8x16_t __ret_24; \
-  __ret_24 = vsetq_lane_s8(vget_lane_s8(__s2_24, __p3_24), __s0_24, __p1_24); \
-  __ret_24; \
-})
-#else
-#define vcopyq_lane_s8(__p0_25, __p1_25, __p2_25, __p3_25) __extension__ ({ \
-  int8x16_t __s0_25 = __p0_25; \
-  int8x8_t __s2_25 = __p2_25; \
-  int8x16_t __rev0_25;  __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_25;  __rev2_25 = __builtin_shufflevector(__s2_25, __s2_25, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_25; \
-  __ret_25 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_25, __p3_25), __rev0_25, __p1_25); \
-  __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_25; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_f32(__p0_26, __p1_26, __p2_26, __p3_26) __extension__ ({ \
-  float32x4_t __s0_26 = __p0_26; \
-  float32x2_t __s2_26 = __p2_26; \
-  float32x4_t __ret_26; \
-  __ret_26 = vsetq_lane_f32(vget_lane_f32(__s2_26, __p3_26), __s0_26, __p1_26); \
-  __ret_26; \
-})
-#else
-#define vcopyq_lane_f32(__p0_27, __p1_27, __p2_27, __p3_27) __extension__ ({ \
-  float32x4_t __s0_27 = __p0_27; \
-  float32x2_t __s2_27 = __p2_27; \
-  float32x4_t __rev0_27;  __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 3, 2, 1, 0); \
-  float32x2_t __rev2_27;  __rev2_27 = __builtin_shufflevector(__s2_27, __s2_27, 1, 0); \
-  float32x4_t __ret_27; \
-  __ret_27 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_27, __p3_27), __rev0_27, __p1_27); \
-  __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 3, 2, 1, 0); \
-  __ret_27; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s32(__p0_28, __p1_28, __p2_28, __p3_28) __extension__ ({ \
-  int32x4_t __s0_28 = __p0_28; \
-  int32x2_t __s2_28 = __p2_28; \
-  int32x4_t __ret_28; \
-  __ret_28 = vsetq_lane_s32(vget_lane_s32(__s2_28, __p3_28), __s0_28, __p1_28); \
-  __ret_28; \
-})
-#else
-#define vcopyq_lane_s32(__p0_29, __p1_29, __p2_29, __p3_29) __extension__ ({ \
-  int32x4_t __s0_29 = __p0_29; \
-  int32x2_t __s2_29 = __p2_29; \
-  int32x4_t __rev0_29;  __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 3, 2, 1, 0); \
-  int32x2_t __rev2_29;  __rev2_29 = __builtin_shufflevector(__s2_29, __s2_29, 1, 0); \
-  int32x4_t __ret_29; \
-  __ret_29 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_29, __p3_29), __rev0_29, __p1_29); \
-  __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 3, 2, 1, 0); \
-  __ret_29; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s64(__p0_30, __p1_30, __p2_30, __p3_30) __extension__ ({ \
-  int64x2_t __s0_30 = __p0_30; \
-  int64x1_t __s2_30 = __p2_30; \
-  int64x2_t __ret_30; \
-  __ret_30 = vsetq_lane_s64(vget_lane_s64(__s2_30, __p3_30), __s0_30, __p1_30); \
-  __ret_30; \
-})
-#else
-#define vcopyq_lane_s64(__p0_31, __p1_31, __p2_31, __p3_31) __extension__ ({ \
-  int64x2_t __s0_31 = __p0_31; \
-  int64x1_t __s2_31 = __p2_31; \
-  int64x2_t __rev0_31;  __rev0_31 = __builtin_shufflevector(__s0_31, __s0_31, 1, 0); \
-  int64x2_t __ret_31; \
-  __ret_31 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_31, __p3_31), __rev0_31, __p1_31); \
-  __ret_31 = __builtin_shufflevector(__ret_31, __ret_31, 1, 0); \
-  __ret_31; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s16(__p0_32, __p1_32, __p2_32, __p3_32) __extension__ ({ \
-  int16x8_t __s0_32 = __p0_32; \
-  int16x4_t __s2_32 = __p2_32; \
-  int16x8_t __ret_32; \
-  __ret_32 = vsetq_lane_s16(vget_lane_s16(__s2_32, __p3_32), __s0_32, __p1_32); \
-  __ret_32; \
-})
-#else
-#define vcopyq_lane_s16(__p0_33, __p1_33, __p2_33, __p3_33) __extension__ ({ \
-  int16x8_t __s0_33 = __p0_33; \
-  int16x4_t __s2_33 = __p2_33; \
-  int16x8_t __rev0_33;  __rev0_33 = __builtin_shufflevector(__s0_33, __s0_33, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_33;  __rev2_33 = __builtin_shufflevector(__s2_33, __s2_33, 3, 2, 1, 0); \
-  int16x8_t __ret_33; \
-  __ret_33 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_33, __p3_33), __rev0_33, __p1_33); \
-  __ret_33 = __builtin_shufflevector(__ret_33, __ret_33, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_33; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p8(__p0_34, __p1_34, __p2_34, __p3_34) __extension__ ({ \
-  poly8x8_t __s0_34 = __p0_34; \
-  poly8x8_t __s2_34 = __p2_34; \
-  poly8x8_t __ret_34; \
-  __ret_34 = vset_lane_p8(vget_lane_p8(__s2_34, __p3_34), __s0_34, __p1_34); \
-  __ret_34; \
-})
-#else
-#define vcopy_lane_p8(__p0_35, __p1_35, __p2_35, __p3_35) __extension__ ({ \
-  poly8x8_t __s0_35 = __p0_35; \
-  poly8x8_t __s2_35 = __p2_35; \
-  poly8x8_t __rev0_35;  __rev0_35 = __builtin_shufflevector(__s0_35, __s0_35, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_35;  __rev2_35 = __builtin_shufflevector(__s2_35, __s2_35, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_35; \
-  __ret_35 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_35, __p3_35), __rev0_35, __p1_35); \
-  __ret_35 = __builtin_shufflevector(__ret_35, __ret_35, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_35; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p16(__p0_36, __p1_36, __p2_36, __p3_36) __extension__ ({ \
-  poly16x4_t __s0_36 = __p0_36; \
-  poly16x4_t __s2_36 = __p2_36; \
-  poly16x4_t __ret_36; \
-  __ret_36 = vset_lane_p16(vget_lane_p16(__s2_36, __p3_36), __s0_36, __p1_36); \
-  __ret_36; \
-})
-#else
-#define vcopy_lane_p16(__p0_37, __p1_37, __p2_37, __p3_37) __extension__ ({ \
-  poly16x4_t __s0_37 = __p0_37; \
-  poly16x4_t __s2_37 = __p2_37; \
-  poly16x4_t __rev0_37;  __rev0_37 = __builtin_shufflevector(__s0_37, __s0_37, 3, 2, 1, 0); \
-  poly16x4_t __rev2_37;  __rev2_37 = __builtin_shufflevector(__s2_37, __s2_37, 3, 2, 1, 0); \
-  poly16x4_t __ret_37; \
-  __ret_37 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_37, __p3_37), __rev0_37, __p1_37); \
-  __ret_37 = __builtin_shufflevector(__ret_37, __ret_37, 3, 2, 1, 0); \
-  __ret_37; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u8(__p0_38, __p1_38, __p2_38, __p3_38) __extension__ ({ \
-  uint8x8_t __s0_38 = __p0_38; \
-  uint8x8_t __s2_38 = __p2_38; \
-  uint8x8_t __ret_38; \
-  __ret_38 = vset_lane_u8(vget_lane_u8(__s2_38, __p3_38), __s0_38, __p1_38); \
-  __ret_38; \
-})
-#else
-#define vcopy_lane_u8(__p0_39, __p1_39, __p2_39, __p3_39) __extension__ ({ \
-  uint8x8_t __s0_39 = __p0_39; \
-  uint8x8_t __s2_39 = __p2_39; \
-  uint8x8_t __rev0_39;  __rev0_39 = __builtin_shufflevector(__s0_39, __s0_39, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_39;  __rev2_39 = __builtin_shufflevector(__s2_39, __s2_39, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_39; \
-  __ret_39 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_39, __p3_39), __rev0_39, __p1_39); \
-  __ret_39 = __builtin_shufflevector(__ret_39, __ret_39, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_39; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u32(__p0_40, __p1_40, __p2_40, __p3_40) __extension__ ({ \
-  uint32x2_t __s0_40 = __p0_40; \
-  uint32x2_t __s2_40 = __p2_40; \
-  uint32x2_t __ret_40; \
-  __ret_40 = vset_lane_u32(vget_lane_u32(__s2_40, __p3_40), __s0_40, __p1_40); \
-  __ret_40; \
-})
-#else
-#define vcopy_lane_u32(__p0_41, __p1_41, __p2_41, __p3_41) __extension__ ({ \
-  uint32x2_t __s0_41 = __p0_41; \
-  uint32x2_t __s2_41 = __p2_41; \
-  uint32x2_t __rev0_41;  __rev0_41 = __builtin_shufflevector(__s0_41, __s0_41, 1, 0); \
-  uint32x2_t __rev2_41;  __rev2_41 = __builtin_shufflevector(__s2_41, __s2_41, 1, 0); \
-  uint32x2_t __ret_41; \
-  __ret_41 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_41, __p3_41), __rev0_41, __p1_41); \
-  __ret_41 = __builtin_shufflevector(__ret_41, __ret_41, 1, 0); \
-  __ret_41; \
-})
-#endif
-
-#define vcopy_lane_u64(__p0_42, __p1_42, __p2_42, __p3_42) __extension__ ({ \
-  uint64x1_t __s0_42 = __p0_42; \
-  uint64x1_t __s2_42 = __p2_42; \
-  uint64x1_t __ret_42; \
-  __ret_42 = vset_lane_u64(vget_lane_u64(__s2_42, __p3_42), __s0_42, __p1_42); \
-  __ret_42; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u16(__p0_43, __p1_43, __p2_43, __p3_43) __extension__ ({ \
-  uint16x4_t __s0_43 = __p0_43; \
-  uint16x4_t __s2_43 = __p2_43; \
-  uint16x4_t __ret_43; \
-  __ret_43 = vset_lane_u16(vget_lane_u16(__s2_43, __p3_43), __s0_43, __p1_43); \
-  __ret_43; \
-})
-#else
-#define vcopy_lane_u16(__p0_44, __p1_44, __p2_44, __p3_44) __extension__ ({ \
-  uint16x4_t __s0_44 = __p0_44; \
-  uint16x4_t __s2_44 = __p2_44; \
-  uint16x4_t __rev0_44;  __rev0_44 = __builtin_shufflevector(__s0_44, __s0_44, 3, 2, 1, 0); \
-  uint16x4_t __rev2_44;  __rev2_44 = __builtin_shufflevector(__s2_44, __s2_44, 3, 2, 1, 0); \
-  uint16x4_t __ret_44; \
-  __ret_44 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_44, __p3_44), __rev0_44, __p1_44); \
-  __ret_44 = __builtin_shufflevector(__ret_44, __ret_44, 3, 2, 1, 0); \
-  __ret_44; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s8(__p0_45, __p1_45, __p2_45, __p3_45) __extension__ ({ \
-  int8x8_t __s0_45 = __p0_45; \
-  int8x8_t __s2_45 = __p2_45; \
-  int8x8_t __ret_45; \
-  __ret_45 = vset_lane_s8(vget_lane_s8(__s2_45, __p3_45), __s0_45, __p1_45); \
-  __ret_45; \
-})
-#else
-#define vcopy_lane_s8(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \
-  int8x8_t __s0_46 = __p0_46; \
-  int8x8_t __s2_46 = __p2_46; \
-  int8x8_t __rev0_46;  __rev0_46 = __builtin_shufflevector(__s0_46, __s0_46, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_46;  __rev2_46 = __builtin_shufflevector(__s2_46, __s2_46, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_46; \
-  __ret_46 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_46, __p3_46), __rev0_46, __p1_46); \
-  __ret_46 = __builtin_shufflevector(__ret_46, __ret_46, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_46; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_f32(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \
-  float32x2_t __s0_47 = __p0_47; \
-  float32x2_t __s2_47 = __p2_47; \
-  float32x2_t __ret_47; \
-  __ret_47 = vset_lane_f32(vget_lane_f32(__s2_47, __p3_47), __s0_47, __p1_47); \
-  __ret_47; \
-})
-#else
-#define vcopy_lane_f32(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \
-  float32x2_t __s0_48 = __p0_48; \
-  float32x2_t __s2_48 = __p2_48; \
-  float32x2_t __rev0_48;  __rev0_48 = __builtin_shufflevector(__s0_48, __s0_48, 1, 0); \
-  float32x2_t __rev2_48;  __rev2_48 = __builtin_shufflevector(__s2_48, __s2_48, 1, 0); \
-  float32x2_t __ret_48; \
-  __ret_48 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_48, __p3_48), __rev0_48, __p1_48); \
-  __ret_48 = __builtin_shufflevector(__ret_48, __ret_48, 1, 0); \
-  __ret_48; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s32(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \
-  int32x2_t __s0_49 = __p0_49; \
-  int32x2_t __s2_49 = __p2_49; \
-  int32x2_t __ret_49; \
-  __ret_49 = vset_lane_s32(vget_lane_s32(__s2_49, __p3_49), __s0_49, __p1_49); \
-  __ret_49; \
-})
-#else
-#define vcopy_lane_s32(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \
-  int32x2_t __s0_50 = __p0_50; \
-  int32x2_t __s2_50 = __p2_50; \
-  int32x2_t __rev0_50;  __rev0_50 = __builtin_shufflevector(__s0_50, __s0_50, 1, 0); \
-  int32x2_t __rev2_50;  __rev2_50 = __builtin_shufflevector(__s2_50, __s2_50, 1, 0); \
-  int32x2_t __ret_50; \
-  __ret_50 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_50, __p3_50), __rev0_50, __p1_50); \
-  __ret_50 = __builtin_shufflevector(__ret_50, __ret_50, 1, 0); \
-  __ret_50; \
-})
-#endif
-
-#define vcopy_lane_s64(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \
-  int64x1_t __s0_51 = __p0_51; \
-  int64x1_t __s2_51 = __p2_51; \
-  int64x1_t __ret_51; \
-  __ret_51 = vset_lane_s64(vget_lane_s64(__s2_51, __p3_51), __s0_51, __p1_51); \
-  __ret_51; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s16(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \
-  int16x4_t __s0_52 = __p0_52; \
-  int16x4_t __s2_52 = __p2_52; \
-  int16x4_t __ret_52; \
-  __ret_52 = vset_lane_s16(vget_lane_s16(__s2_52, __p3_52), __s0_52, __p1_52); \
-  __ret_52; \
-})
-#else
-#define vcopy_lane_s16(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \
-  int16x4_t __s0_53 = __p0_53; \
-  int16x4_t __s2_53 = __p2_53; \
-  int16x4_t __rev0_53;  __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 3, 2, 1, 0); \
-  int16x4_t __rev2_53;  __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 3, 2, 1, 0); \
-  int16x4_t __ret_53; \
-  __ret_53 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_53, __p3_53), __rev0_53, __p1_53); \
-  __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 3, 2, 1, 0); \
-  __ret_53; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p8(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \
-  poly8x16_t __s0_54 = __p0_54; \
-  poly8x16_t __s2_54 = __p2_54; \
-  poly8x16_t __ret_54; \
-  __ret_54 = vsetq_lane_p8(vgetq_lane_p8(__s2_54, __p3_54), __s0_54, __p1_54); \
-  __ret_54; \
-})
-#else
-#define vcopyq_laneq_p8(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \
-  poly8x16_t __s0_55 = __p0_55; \
-  poly8x16_t __s2_55 = __p2_55; \
-  poly8x16_t __rev0_55;  __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_55;  __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_55; \
-  __ret_55 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_55, __p3_55), __rev0_55, __p1_55); \
-  __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_55; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p16(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \
-  poly16x8_t __s0_56 = __p0_56; \
-  poly16x8_t __s2_56 = __p2_56; \
-  poly16x8_t __ret_56; \
-  __ret_56 = vsetq_lane_p16(vgetq_lane_p16(__s2_56, __p3_56), __s0_56, __p1_56); \
-  __ret_56; \
-})
-#else
-#define vcopyq_laneq_p16(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \
-  poly16x8_t __s0_57 = __p0_57; \
-  poly16x8_t __s2_57 = __p2_57; \
-  poly16x8_t __rev0_57;  __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev2_57;  __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret_57; \
-  __ret_57 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_57, __p3_57), __rev0_57, __p1_57); \
-  __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_57; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u8(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \
-  uint8x16_t __s0_58 = __p0_58; \
-  uint8x16_t __s2_58 = __p2_58; \
-  uint8x16_t __ret_58; \
-  __ret_58 = vsetq_lane_u8(vgetq_lane_u8(__s2_58, __p3_58), __s0_58, __p1_58); \
-  __ret_58; \
-})
-#else
-#define vcopyq_laneq_u8(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \
-  uint8x16_t __s0_59 = __p0_59; \
-  uint8x16_t __s2_59 = __p2_59; \
-  uint8x16_t __rev0_59;  __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_59;  __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_59; \
-  __ret_59 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_59, __p3_59), __rev0_59, __p1_59); \
-  __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_59; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u32(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \
-  uint32x4_t __s0_60 = __p0_60; \
-  uint32x4_t __s2_60 = __p2_60; \
-  uint32x4_t __ret_60; \
-  __ret_60 = vsetq_lane_u32(vgetq_lane_u32(__s2_60, __p3_60), __s0_60, __p1_60); \
-  __ret_60; \
-})
-#else
-#define vcopyq_laneq_u32(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \
-  uint32x4_t __s0_61 = __p0_61; \
-  uint32x4_t __s2_61 = __p2_61; \
-  uint32x4_t __rev0_61;  __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 3, 2, 1, 0); \
-  uint32x4_t __rev2_61;  __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 3, 2, 1, 0); \
-  uint32x4_t __ret_61; \
-  __ret_61 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_61, __p3_61), __rev0_61, __p1_61); \
-  __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 3, 2, 1, 0); \
-  __ret_61; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u64(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \
-  uint64x2_t __s0_62 = __p0_62; \
-  uint64x2_t __s2_62 = __p2_62; \
-  uint64x2_t __ret_62; \
-  __ret_62 = vsetq_lane_u64(vgetq_lane_u64(__s2_62, __p3_62), __s0_62, __p1_62); \
-  __ret_62; \
-})
-#else
-#define vcopyq_laneq_u64(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \
-  uint64x2_t __s0_63 = __p0_63; \
-  uint64x2_t __s2_63 = __p2_63; \
-  uint64x2_t __rev0_63;  __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 1, 0); \
-  uint64x2_t __rev2_63;  __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \
-  uint64x2_t __ret_63; \
-  __ret_63 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_63, __p3_63), __rev0_63, __p1_63); \
-  __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 1, 0); \
-  __ret_63; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \
-  uint16x8_t __s0_64 = __p0_64; \
-  uint16x8_t __s2_64 = __p2_64; \
-  uint16x8_t __ret_64; \
-  __ret_64 = vsetq_lane_u16(vgetq_lane_u16(__s2_64, __p3_64), __s0_64, __p1_64); \
-  __ret_64; \
-})
-#else
-#define vcopyq_laneq_u16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \
-  uint16x8_t __s0_65 = __p0_65; \
-  uint16x8_t __s2_65 = __p2_65; \
-  uint16x8_t __rev0_65;  __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_65;  __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_65; \
-  __ret_65 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_65, __p3_65), __rev0_65, __p1_65); \
-  __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_65; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s8(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \
-  int8x16_t __s0_66 = __p0_66; \
-  int8x16_t __s2_66 = __p2_66; \
-  int8x16_t __ret_66; \
-  __ret_66 = vsetq_lane_s8(vgetq_lane_s8(__s2_66, __p3_66), __s0_66, __p1_66); \
-  __ret_66; \
-})
-#else
-#define vcopyq_laneq_s8(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \
-  int8x16_t __s0_67 = __p0_67; \
-  int8x16_t __s2_67 = __p2_67; \
-  int8x16_t __rev0_67;  __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_67;  __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_67; \
-  __ret_67 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_67, __p3_67), __rev0_67, __p1_67); \
-  __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_67; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_f32(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \
-  float32x4_t __s0_68 = __p0_68; \
-  float32x4_t __s2_68 = __p2_68; \
-  float32x4_t __ret_68; \
-  __ret_68 = vsetq_lane_f32(vgetq_lane_f32(__s2_68, __p3_68), __s0_68, __p1_68); \
-  __ret_68; \
-})
-#else
-#define vcopyq_laneq_f32(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \
-  float32x4_t __s0_69 = __p0_69; \
-  float32x4_t __s2_69 = __p2_69; \
-  float32x4_t __rev0_69;  __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 3, 2, 1, 0); \
-  float32x4_t __rev2_69;  __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 3, 2, 1, 0); \
-  float32x4_t __ret_69; \
-  __ret_69 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_69, __p3_69), __rev0_69, __p1_69); \
-  __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 3, 2, 1, 0); \
-  __ret_69; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s32(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \
-  int32x4_t __s0_70 = __p0_70; \
-  int32x4_t __s2_70 = __p2_70; \
-  int32x4_t __ret_70; \
-  __ret_70 = vsetq_lane_s32(vgetq_lane_s32(__s2_70, __p3_70), __s0_70, __p1_70); \
-  __ret_70; \
-})
-#else
-#define vcopyq_laneq_s32(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \
-  int32x4_t __s0_71 = __p0_71; \
-  int32x4_t __s2_71 = __p2_71; \
-  int32x4_t __rev0_71;  __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 3, 2, 1, 0); \
-  int32x4_t __rev2_71;  __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 3, 2, 1, 0); \
-  int32x4_t __ret_71; \
-  __ret_71 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_71, __p3_71), __rev0_71, __p1_71); \
-  __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 3, 2, 1, 0); \
-  __ret_71; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s64(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \
-  int64x2_t __s0_72 = __p0_72; \
-  int64x2_t __s2_72 = __p2_72; \
-  int64x2_t __ret_72; \
-  __ret_72 = vsetq_lane_s64(vgetq_lane_s64(__s2_72, __p3_72), __s0_72, __p1_72); \
-  __ret_72; \
-})
-#else
-#define vcopyq_laneq_s64(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \
-  int64x2_t __s0_73 = __p0_73; \
-  int64x2_t __s2_73 = __p2_73; \
-  int64x2_t __rev0_73;  __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 1, 0); \
-  int64x2_t __rev2_73;  __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 1, 0); \
-  int64x2_t __ret_73; \
-  __ret_73 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_73, __p3_73), __rev0_73, __p1_73); \
-  __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 1, 0); \
-  __ret_73; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s16(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \
-  int16x8_t __s0_74 = __p0_74; \
-  int16x8_t __s2_74 = __p2_74; \
-  int16x8_t __ret_74; \
-  __ret_74 = vsetq_lane_s16(vgetq_lane_s16(__s2_74, __p3_74), __s0_74, __p1_74); \
-  __ret_74; \
-})
-#else
-#define vcopyq_laneq_s16(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \
-  int16x8_t __s0_75 = __p0_75; \
-  int16x8_t __s2_75 = __p2_75; \
-  int16x8_t __rev0_75;  __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_75;  __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_75; \
-  __ret_75 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_75, __p3_75), __rev0_75, __p1_75); \
-  __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_75; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p8(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \
-  poly8x8_t __s0_76 = __p0_76; \
-  poly8x16_t __s2_76 = __p2_76; \
-  poly8x8_t __ret_76; \
-  __ret_76 = vset_lane_p8(vgetq_lane_p8(__s2_76, __p3_76), __s0_76, __p1_76); \
-  __ret_76; \
-})
-#else
-#define vcopy_laneq_p8(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \
-  poly8x8_t __s0_77 = __p0_77; \
-  poly8x16_t __s2_77 = __p2_77; \
-  poly8x8_t __rev0_77;  __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_77;  __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_77; \
-  __ret_77 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_77, __p3_77), __rev0_77, __p1_77); \
-  __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_77; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p16(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \
-  poly16x4_t __s0_78 = __p0_78; \
-  poly16x8_t __s2_78 = __p2_78; \
-  poly16x4_t __ret_78; \
-  __ret_78 = vset_lane_p16(vgetq_lane_p16(__s2_78, __p3_78), __s0_78, __p1_78); \
-  __ret_78; \
-})
-#else
-#define vcopy_laneq_p16(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \
-  poly16x4_t __s0_79 = __p0_79; \
-  poly16x8_t __s2_79 = __p2_79; \
-  poly16x4_t __rev0_79;  __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 3, 2, 1, 0); \
-  poly16x8_t __rev2_79;  __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret_79; \
-  __ret_79 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_79, __p3_79), __rev0_79, __p1_79); \
-  __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 3, 2, 1, 0); \
-  __ret_79; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u8(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \
-  uint8x8_t __s0_80 = __p0_80; \
-  uint8x16_t __s2_80 = __p2_80; \
-  uint8x8_t __ret_80; \
-  __ret_80 = vset_lane_u8(vgetq_lane_u8(__s2_80, __p3_80), __s0_80, __p1_80); \
-  __ret_80; \
-})
-#else
-#define vcopy_laneq_u8(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \
-  uint8x8_t __s0_81 = __p0_81; \
-  uint8x16_t __s2_81 = __p2_81; \
-  uint8x8_t __rev0_81;  __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_81;  __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_81; \
-  __ret_81 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_81, __p3_81), __rev0_81, __p1_81); \
-  __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_81; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u32(__p0_82, __p1_82, __p2_82, __p3_82) __extension__ ({ \
-  uint32x2_t __s0_82 = __p0_82; \
-  uint32x4_t __s2_82 = __p2_82; \
-  uint32x2_t __ret_82; \
-  __ret_82 = vset_lane_u32(vgetq_lane_u32(__s2_82, __p3_82), __s0_82, __p1_82); \
-  __ret_82; \
-})
-#else
-#define vcopy_laneq_u32(__p0_83, __p1_83, __p2_83, __p3_83) __extension__ ({ \
-  uint32x2_t __s0_83 = __p0_83; \
-  uint32x4_t __s2_83 = __p2_83; \
-  uint32x2_t __rev0_83;  __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 1, 0); \
-  uint32x4_t __rev2_83;  __rev2_83 = __builtin_shufflevector(__s2_83, __s2_83, 3, 2, 1, 0); \
-  uint32x2_t __ret_83; \
-  __ret_83 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_83, __p3_83), __rev0_83, __p1_83); \
-  __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 1, 0); \
-  __ret_83; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u64(__p0_84, __p1_84, __p2_84, __p3_84) __extension__ ({ \
-  uint64x1_t __s0_84 = __p0_84; \
-  uint64x2_t __s2_84 = __p2_84; \
-  uint64x1_t __ret_84; \
-  __ret_84 = vset_lane_u64(vgetq_lane_u64(__s2_84, __p3_84), __s0_84, __p1_84); \
-  __ret_84; \
-})
-#else
-#define vcopy_laneq_u64(__p0_85, __p1_85, __p2_85, __p3_85) __extension__ ({ \
-  uint64x1_t __s0_85 = __p0_85; \
-  uint64x2_t __s2_85 = __p2_85; \
-  uint64x2_t __rev2_85;  __rev2_85 = __builtin_shufflevector(__s2_85, __s2_85, 1, 0); \
-  uint64x1_t __ret_85; \
-  __ret_85 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_85, __p3_85), __s0_85, __p1_85); \
-  __ret_85; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u16(__p0_86, __p1_86, __p2_86, __p3_86) __extension__ ({ \
-  uint16x4_t __s0_86 = __p0_86; \
-  uint16x8_t __s2_86 = __p2_86; \
-  uint16x4_t __ret_86; \
-  __ret_86 = vset_lane_u16(vgetq_lane_u16(__s2_86, __p3_86), __s0_86, __p1_86); \
-  __ret_86; \
-})
-#else
-#define vcopy_laneq_u16(__p0_87, __p1_87, __p2_87, __p3_87) __extension__ ({ \
-  uint16x4_t __s0_87 = __p0_87; \
-  uint16x8_t __s2_87 = __p2_87; \
-  uint16x4_t __rev0_87;  __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \
-  uint16x8_t __rev2_87;  __rev2_87 = __builtin_shufflevector(__s2_87, __s2_87, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_87; \
-  __ret_87 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_87, __p3_87), __rev0_87, __p1_87); \
-  __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \
-  __ret_87; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s8(__p0_88, __p1_88, __p2_88, __p3_88) __extension__ ({ \
-  int8x8_t __s0_88 = __p0_88; \
-  int8x16_t __s2_88 = __p2_88; \
-  int8x8_t __ret_88; \
-  __ret_88 = vset_lane_s8(vgetq_lane_s8(__s2_88, __p3_88), __s0_88, __p1_88); \
-  __ret_88; \
-})
-#else
-#define vcopy_laneq_s8(__p0_89, __p1_89, __p2_89, __p3_89) __extension__ ({ \
-  int8x8_t __s0_89 = __p0_89; \
-  int8x16_t __s2_89 = __p2_89; \
-  int8x8_t __rev0_89;  __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_89;  __rev2_89 = __builtin_shufflevector(__s2_89, __s2_89, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_89; \
-  __ret_89 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_89, __p3_89), __rev0_89, __p1_89); \
-  __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_89; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_f32(__p0_90, __p1_90, __p2_90, __p3_90) __extension__ ({ \
-  float32x2_t __s0_90 = __p0_90; \
-  float32x4_t __s2_90 = __p2_90; \
-  float32x2_t __ret_90; \
-  __ret_90 = vset_lane_f32(vgetq_lane_f32(__s2_90, __p3_90), __s0_90, __p1_90); \
-  __ret_90; \
-})
-#else
-#define vcopy_laneq_f32(__p0_91, __p1_91, __p2_91, __p3_91) __extension__ ({ \
-  float32x2_t __s0_91 = __p0_91; \
-  float32x4_t __s2_91 = __p2_91; \
-  float32x2_t __rev0_91;  __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 1, 0); \
-  float32x4_t __rev2_91;  __rev2_91 = __builtin_shufflevector(__s2_91, __s2_91, 3, 2, 1, 0); \
-  float32x2_t __ret_91; \
-  __ret_91 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_91, __p3_91), __rev0_91, __p1_91); \
-  __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 1, 0); \
-  __ret_91; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s32(__p0_92, __p1_92, __p2_92, __p3_92) __extension__ ({ \
-  int32x2_t __s0_92 = __p0_92; \
-  int32x4_t __s2_92 = __p2_92; \
-  int32x2_t __ret_92; \
-  __ret_92 = vset_lane_s32(vgetq_lane_s32(__s2_92, __p3_92), __s0_92, __p1_92); \
-  __ret_92; \
-})
-#else
-#define vcopy_laneq_s32(__p0_93, __p1_93, __p2_93, __p3_93) __extension__ ({ \
-  int32x2_t __s0_93 = __p0_93; \
-  int32x4_t __s2_93 = __p2_93; \
-  int32x2_t __rev0_93;  __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 1, 0); \
-  int32x4_t __rev2_93;  __rev2_93 = __builtin_shufflevector(__s2_93, __s2_93, 3, 2, 1, 0); \
-  int32x2_t __ret_93; \
-  __ret_93 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_93, __p3_93), __rev0_93, __p1_93); \
-  __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 1, 0); \
-  __ret_93; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s64(__p0_94, __p1_94, __p2_94, __p3_94) __extension__ ({ \
-  int64x1_t __s0_94 = __p0_94; \
-  int64x2_t __s2_94 = __p2_94; \
-  int64x1_t __ret_94; \
-  __ret_94 = vset_lane_s64(vgetq_lane_s64(__s2_94, __p3_94), __s0_94, __p1_94); \
-  __ret_94; \
-})
-#else
-#define vcopy_laneq_s64(__p0_95, __p1_95, __p2_95, __p3_95) __extension__ ({ \
-  int64x1_t __s0_95 = __p0_95; \
-  int64x2_t __s2_95 = __p2_95; \
-  int64x2_t __rev2_95;  __rev2_95 = __builtin_shufflevector(__s2_95, __s2_95, 1, 0); \
-  int64x1_t __ret_95; \
-  __ret_95 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_95, __p3_95), __s0_95, __p1_95); \
-  __ret_95; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s16(__p0_96, __p1_96, __p2_96, __p3_96) __extension__ ({ \
-  int16x4_t __s0_96 = __p0_96; \
-  int16x8_t __s2_96 = __p2_96; \
-  int16x4_t __ret_96; \
-  __ret_96 = vset_lane_s16(vgetq_lane_s16(__s2_96, __p3_96), __s0_96, __p1_96); \
-  __ret_96; \
-})
-#else
-#define vcopy_laneq_s16(__p0_97, __p1_97, __p2_97, __p3_97) __extension__ ({ \
-  int16x4_t __s0_97 = __p0_97; \
-  int16x8_t __s2_97 = __p2_97; \
-  int16x4_t __rev0_97;  __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 3, 2, 1, 0); \
-  int16x8_t __rev2_97;  __rev2_97 = __builtin_shufflevector(__s2_97, __s2_97, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_97; \
-  __ret_97 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_97, __p3_97), __rev0_97, __p1_97); \
-  __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 3, 2, 1, 0); \
-  __ret_97; \
-})
-#endif
-
-__ai poly64x1_t vcreate_p64(uint64_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vcreate_f64(uint64_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float32_t vcvts_f32_s32(int32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0);
-  return __ret;
-}
-__ai float32_t vcvts_f32_u32(uint32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
-  return __ret;
-}
-#endif
-
-__ai float64_t vcvtd_f64_s64(int64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0);
-  return __ret;
-}
-__ai float64_t vcvtd_f64_u64(uint64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-__ai float64x1_t vcvt_f64_s64(int64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
-  float16x8_t __ret;
-  __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1));
-  return __ret;
-}
-#else
-__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = vcvt_f32_f16(vget_high_f16(__p0));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x4_t __ret;
-  __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = vcvt_f64_f32(vget_high_f32(__p0));
-  return __ret;
-}
-#else
-__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \
-  __ret; \
-})
-#define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \
-  __ret; \
-})
-#define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \
-  __ret; \
-})
-#define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \
-  __ret; \
-})
-#define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \
-  __ret; \
-})
-__ai int32_t vcvts_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvt_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai uint32_t vcvts_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-__ai int32_t vcvtas_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtad_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtas_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtad_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0);
-  return __ret;
-}
-__ai int32_t vcvtms_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtmd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtms_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtmd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0);
-  return __ret;
-}
-__ai int32_t vcvtns_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtnd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtns_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtnd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0);
-  return __ret;
-}
-__ai int32_t vcvtps_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtpd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtps_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtpd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0);
-  return __ret;
-}
-__ai float32_t vcvtxd_f32_f64(float64_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x4_t __ret;
-  __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_lane_f64((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_lane_f32((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_lane_f32((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdup_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vdup_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x1_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x1_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x1_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x1_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
-  __ret; \
-})
-#else
-#define vdup_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai poly64x1_t vdup_n_p64(poly64_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vdupq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float64x2_t vdupq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vdup_n_f64(float64_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) {__p0};
-  return __ret;
-}
-#define vext_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \
-  __ret; \
-})
-#else
-#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vext_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
-  __ret; \
-})
-#else
-#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
-  __ret; \
-})
-#else
-#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
-  __ret; \
-})
-#endif
-
-#define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
-  __ret; \
-})
-#else
-#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
-  __ret; \
-})
-#else
-#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
-  __ret; \
-})
-#else
-#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
-  __ret; \
-})
-#else
-#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \
-  __ret; \
-})
-#define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
-  __ret; \
-})
-#else
-#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __ret;
-  __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfma_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) {
-  float64x1_t __ret;
-  __ret = vfma_f64(__p0, __p1, (float64x1_t) {__p2});
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = vfmaq_f64(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vfmaq_f64(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = vfma_f64(__p0, -__p1, __p2);
-  return __ret;
-}
-#define vfmsd_lane_f64(__p0_98, __p1_98, __p2_98, __p3_98) __extension__ ({ \
-  float64_t __s0_98 = __p0_98; \
-  float64_t __s1_98 = __p1_98; \
-  float64x1_t __s2_98 = __p2_98; \
-  float64_t __ret_98; \
-  __ret_98 = vfmad_lane_f64(__s0_98, -__s1_98, __s2_98, __p3_98); \
-  __ret_98; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfmss_lane_f32(__p0_99, __p1_99, __p2_99, __p3_99) __extension__ ({ \
-  float32_t __s0_99 = __p0_99; \
-  float32_t __s1_99 = __p1_99; \
-  float32x2_t __s2_99 = __p2_99; \
-  float32_t __ret_99; \
-  __ret_99 = vfmas_lane_f32(__s0_99, -__s1_99, __s2_99, __p3_99); \
-  __ret_99; \
-})
-#else
-#define vfmss_lane_f32(__p0_100, __p1_100, __p2_100, __p3_100) __extension__ ({ \
-  float32_t __s0_100 = __p0_100; \
-  float32_t __s1_100 = __p1_100; \
-  float32x2_t __s2_100 = __p2_100; \
-  float32x2_t __rev2_100;  __rev2_100 = __builtin_shufflevector(__s2_100, __s2_100, 1, 0); \
-  float32_t __ret_100; \
-  __ret_100 = __noswap_vfmas_lane_f32(__s0_100, -__s1_100, __rev2_100, __p3_100); \
-  __ret_100; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f64(__p0_101, __p1_101, __p2_101, __p3_101) __extension__ ({ \
-  float64x2_t __s0_101 = __p0_101; \
-  float64x2_t __s1_101 = __p1_101; \
-  float64x1_t __s2_101 = __p2_101; \
-  float64x2_t __ret_101; \
-  __ret_101 = vfmaq_lane_f64(__s0_101, -__s1_101, __s2_101, __p3_101); \
-  __ret_101; \
-})
-#else
-#define vfmsq_lane_f64(__p0_102, __p1_102, __p2_102, __p3_102) __extension__ ({ \
-  float64x2_t __s0_102 = __p0_102; \
-  float64x2_t __s1_102 = __p1_102; \
-  float64x1_t __s2_102 = __p2_102; \
-  float64x2_t __rev0_102;  __rev0_102 = __builtin_shufflevector(__s0_102, __s0_102, 1, 0); \
-  float64x2_t __rev1_102;  __rev1_102 = __builtin_shufflevector(__s1_102, __s1_102, 1, 0); \
-  float64x2_t __ret_102; \
-  __ret_102 = __noswap_vfmaq_lane_f64(__rev0_102, -__rev1_102, __s2_102, __p3_102); \
-  __ret_102 = __builtin_shufflevector(__ret_102, __ret_102, 1, 0); \
-  __ret_102; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f32(__p0_103, __p1_103, __p2_103, __p3_103) __extension__ ({ \
-  float32x4_t __s0_103 = __p0_103; \
-  float32x4_t __s1_103 = __p1_103; \
-  float32x2_t __s2_103 = __p2_103; \
-  float32x4_t __ret_103; \
-  __ret_103 = vfmaq_lane_f32(__s0_103, -__s1_103, __s2_103, __p3_103); \
-  __ret_103; \
-})
-#else
-#define vfmsq_lane_f32(__p0_104, __p1_104, __p2_104, __p3_104) __extension__ ({ \
-  float32x4_t __s0_104 = __p0_104; \
-  float32x4_t __s1_104 = __p1_104; \
-  float32x2_t __s2_104 = __p2_104; \
-  float32x4_t __rev0_104;  __rev0_104 = __builtin_shufflevector(__s0_104, __s0_104, 3, 2, 1, 0); \
-  float32x4_t __rev1_104;  __rev1_104 = __builtin_shufflevector(__s1_104, __s1_104, 3, 2, 1, 0); \
-  float32x2_t __rev2_104;  __rev2_104 = __builtin_shufflevector(__s2_104, __s2_104, 1, 0); \
-  float32x4_t __ret_104; \
-  __ret_104 = __noswap_vfmaq_lane_f32(__rev0_104, -__rev1_104, __rev2_104, __p3_104); \
-  __ret_104 = __builtin_shufflevector(__ret_104, __ret_104, 3, 2, 1, 0); \
-  __ret_104; \
-})
-#endif
-
-#define vfms_lane_f64(__p0_105, __p1_105, __p2_105, __p3_105) __extension__ ({ \
-  float64x1_t __s0_105 = __p0_105; \
-  float64x1_t __s1_105 = __p1_105; \
-  float64x1_t __s2_105 = __p2_105; \
-  float64x1_t __ret_105; \
-  __ret_105 = vfma_lane_f64(__s0_105, -__s1_105, __s2_105, __p3_105); \
-  __ret_105; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfms_lane_f32(__p0_106, __p1_106, __p2_106, __p3_106) __extension__ ({ \
-  float32x2_t __s0_106 = __p0_106; \
-  float32x2_t __s1_106 = __p1_106; \
-  float32x2_t __s2_106 = __p2_106; \
-  float32x2_t __ret_106; \
-  __ret_106 = vfma_lane_f32(__s0_106, -__s1_106, __s2_106, __p3_106); \
-  __ret_106; \
-})
-#else
-#define vfms_lane_f32(__p0_107, __p1_107, __p2_107, __p3_107) __extension__ ({ \
-  float32x2_t __s0_107 = __p0_107; \
-  float32x2_t __s1_107 = __p1_107; \
-  float32x2_t __s2_107 = __p2_107; \
-  float32x2_t __rev0_107;  __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 1, 0); \
-  float32x2_t __rev1_107;  __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 1, 0); \
-  float32x2_t __rev2_107;  __rev2_107 = __builtin_shufflevector(__s2_107, __s2_107, 1, 0); \
-  float32x2_t __ret_107; \
-  __ret_107 = __noswap_vfma_lane_f32(__rev0_107, -__rev1_107, __rev2_107, __p3_107); \
-  __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 1, 0); \
-  __ret_107; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsd_laneq_f64(__p0_108, __p1_108, __p2_108, __p3_108) __extension__ ({ \
-  float64_t __s0_108 = __p0_108; \
-  float64_t __s1_108 = __p1_108; \
-  float64x2_t __s2_108 = __p2_108; \
-  float64_t __ret_108; \
-  __ret_108 = vfmad_laneq_f64(__s0_108, -__s1_108, __s2_108, __p3_108); \
-  __ret_108; \
-})
-#else
-#define vfmsd_laneq_f64(__p0_109, __p1_109, __p2_109, __p3_109) __extension__ ({ \
-  float64_t __s0_109 = __p0_109; \
-  float64_t __s1_109 = __p1_109; \
-  float64x2_t __s2_109 = __p2_109; \
-  float64x2_t __rev2_109;  __rev2_109 = __builtin_shufflevector(__s2_109, __s2_109, 1, 0); \
-  float64_t __ret_109; \
-  __ret_109 = __noswap_vfmad_laneq_f64(__s0_109, -__s1_109, __rev2_109, __p3_109); \
-  __ret_109; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmss_laneq_f32(__p0_110, __p1_110, __p2_110, __p3_110) __extension__ ({ \
-  float32_t __s0_110 = __p0_110; \
-  float32_t __s1_110 = __p1_110; \
-  float32x4_t __s2_110 = __p2_110; \
-  float32_t __ret_110; \
-  __ret_110 = vfmas_laneq_f32(__s0_110, -__s1_110, __s2_110, __p3_110); \
-  __ret_110; \
-})
-#else
-#define vfmss_laneq_f32(__p0_111, __p1_111, __p2_111, __p3_111) __extension__ ({ \
-  float32_t __s0_111 = __p0_111; \
-  float32_t __s1_111 = __p1_111; \
-  float32x4_t __s2_111 = __p2_111; \
-  float32x4_t __rev2_111;  __rev2_111 = __builtin_shufflevector(__s2_111, __s2_111, 3, 2, 1, 0); \
-  float32_t __ret_111; \
-  __ret_111 = __noswap_vfmas_laneq_f32(__s0_111, -__s1_111, __rev2_111, __p3_111); \
-  __ret_111; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f64(__p0_112, __p1_112, __p2_112, __p3_112) __extension__ ({ \
-  float64x2_t __s0_112 = __p0_112; \
-  float64x2_t __s1_112 = __p1_112; \
-  float64x2_t __s2_112 = __p2_112; \
-  float64x2_t __ret_112; \
-  __ret_112 = vfmaq_laneq_f64(__s0_112, -__s1_112, __s2_112, __p3_112); \
-  __ret_112; \
-})
-#else
-#define vfmsq_laneq_f64(__p0_113, __p1_113, __p2_113, __p3_113) __extension__ ({ \
-  float64x2_t __s0_113 = __p0_113; \
-  float64x2_t __s1_113 = __p1_113; \
-  float64x2_t __s2_113 = __p2_113; \
-  float64x2_t __rev0_113;  __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 1, 0); \
-  float64x2_t __rev1_113;  __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 1, 0); \
-  float64x2_t __rev2_113;  __rev2_113 = __builtin_shufflevector(__s2_113, __s2_113, 1, 0); \
-  float64x2_t __ret_113; \
-  __ret_113 = __noswap_vfmaq_laneq_f64(__rev0_113, -__rev1_113, __rev2_113, __p3_113); \
-  __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 1, 0); \
-  __ret_113; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \
-  float32x4_t __s0_114 = __p0_114; \
-  float32x4_t __s1_114 = __p1_114; \
-  float32x4_t __s2_114 = __p2_114; \
-  float32x4_t __ret_114; \
-  __ret_114 = vfmaq_laneq_f32(__s0_114, -__s1_114, __s2_114, __p3_114); \
-  __ret_114; \
-})
-#else
-#define vfmsq_laneq_f32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \
-  float32x4_t __s0_115 = __p0_115; \
-  float32x4_t __s1_115 = __p1_115; \
-  float32x4_t __s2_115 = __p2_115; \
-  float32x4_t __rev0_115;  __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 3, 2, 1, 0); \
-  float32x4_t __rev1_115;  __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 3, 2, 1, 0); \
-  float32x4_t __rev2_115;  __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 3, 2, 1, 0); \
-  float32x4_t __ret_115; \
-  __ret_115 = __noswap_vfmaq_laneq_f32(__rev0_115, -__rev1_115, __rev2_115, __p3_115); \
-  __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 3, 2, 1, 0); \
-  __ret_115; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f64(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \
-  float64x1_t __s0_116 = __p0_116; \
-  float64x1_t __s1_116 = __p1_116; \
-  float64x2_t __s2_116 = __p2_116; \
-  float64x1_t __ret_116; \
-  __ret_116 = vfma_laneq_f64(__s0_116, -__s1_116, __s2_116, __p3_116); \
-  __ret_116; \
-})
-#else
-#define vfms_laneq_f64(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \
-  float64x1_t __s0_117 = __p0_117; \
-  float64x1_t __s1_117 = __p1_117; \
-  float64x2_t __s2_117 = __p2_117; \
-  float64x2_t __rev2_117;  __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 1, 0); \
-  float64x1_t __ret_117; \
-  __ret_117 = __noswap_vfma_laneq_f64(__s0_117, -__s1_117, __rev2_117, __p3_117); \
-  __ret_117; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f32(__p0_118, __p1_118, __p2_118, __p3_118) __extension__ ({ \
-  float32x2_t __s0_118 = __p0_118; \
-  float32x2_t __s1_118 = __p1_118; \
-  float32x4_t __s2_118 = __p2_118; \
-  float32x2_t __ret_118; \
-  __ret_118 = vfma_laneq_f32(__s0_118, -__s1_118, __s2_118, __p3_118); \
-  __ret_118; \
-})
-#else
-#define vfms_laneq_f32(__p0_119, __p1_119, __p2_119, __p3_119) __extension__ ({ \
-  float32x2_t __s0_119 = __p0_119; \
-  float32x2_t __s1_119 = __p1_119; \
-  float32x4_t __s2_119 = __p2_119; \
-  float32x2_t __rev0_119;  __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \
-  float32x2_t __rev1_119;  __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \
-  float32x4_t __rev2_119;  __rev2_119 = __builtin_shufflevector(__s2_119, __s2_119, 3, 2, 1, 0); \
-  float32x2_t __ret_119; \
-  __ret_119 = __noswap_vfma_laneq_f32(__rev0_119, -__rev1_119, __rev2_119, __p3_119); \
-  __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \
-  __ret_119; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __ret;
-  __ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vfmaq_f64(__rev0, -__rev1, (float64x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vfmaq_f32(__rev0, -__rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfms_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) {
-  float64x1_t __ret;
-  __ret = vfma_f64(__p0, -__p1, (float64x1_t) {__p2});
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __noswap_vfma_f32(__rev0, -__rev1, (float32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-__ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x1_t vget_high_f64(float64x2_t __p0) {
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai float64x1_t vget_high_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-#endif
-
-#define vget_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vget_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x1_t vget_low_f64(float64x2_t __p0) {
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai float64x1_t vget_low_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#define vld1_p64(__p0) __extension__ ({ \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64(__p0) __extension__ ({ \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \
-  __ret; \
-})
-#define vld1_dup_p64(__p0) __extension__ ({ \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_dup_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_dup_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_dup_f64(__p0) __extension__ ({ \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \
-  __ret; \
-})
-#define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
-  __ret; \
-})
-#else
-#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
-  __ret; \
-})
-#define vld1_p64_x2(__p0) __extension__ ({ \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64_x2(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64_x2(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64_x2(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64_x2(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64_x2(__p0) __extension__ ({ \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld1_p64_x3(__p0) __extension__ ({ \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64_x3(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64_x3(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64_x3(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64_x3(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64_x3(__p0) __extension__ ({ \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld1_p64_x4(__p0) __extension__ ({ \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64_x4(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64_x4(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64_x4(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64_x4(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64_x4(__p0) __extension__ ({ \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld2_p64(__p0) __extension__ ({ \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld2q_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld2q_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld2q_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld2q_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_f64(__p0) __extension__ ({ \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld2_dup_p64(__p0) __extension__ ({ \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld2q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld2q_dup_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_dup_f64(__p0) __extension__ ({ \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
-  __ret; \
-})
-#else
-#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
-  __ret; \
-})
-#else
-#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 42); \
-  __ret; \
-})
-#else
-#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 35); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
-  __ret; \
-})
-#define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 10); \
-  __ret; \
-})
-#define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 3); \
-  __ret; \
-})
-#define vld3_p64(__p0) __extension__ ({ \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld3q_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld3q_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld3q_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld3q_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_f64(__p0) __extension__ ({ \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld3_dup_p64(__p0) __extension__ ({ \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld3q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld3q_dup_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_dup_f64(__p0) __extension__ ({ \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
-  __ret; \
-})
-#else
-#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
-  __ret; \
-})
-#else
-#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 42); \
-  __ret; \
-})
-#else
-#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 35); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
-  __ret; \
-})
-#define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
-  __ret; \
-})
-#define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
-  __ret; \
-})
-#define vld4_p64(__p0) __extension__ ({ \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld4q_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld4q_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld4q_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld4q_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_f64(__p0) __extension__ ({ \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld4_dup_p64(__p0) __extension__ ({ \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld4q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld4q_dup_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_dup_f64(__p0) __extension__ ({ \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
-  __ret; \
-})
-#else
-#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
-  __ret; \
-})
-#else
-#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 42); \
-  __ret; \
-})
-#else
-#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 35); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
-  __ret; \
-})
-#define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
-  __ret; \
-})
-#define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
-  __ret; \
-})
-#define vldrq_p128(__p0) __extension__ ({ \
-  poly128_t __ret; \
-  __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxnmv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmv_f32((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxnmv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmv_f32((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxvq_u8((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxvq_u8((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxvq_u32((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxvq_u32((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxvq_u16((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxvq_u16((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vmaxvq_s8(int8x16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxvq_s8((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai int8_t vmaxvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxvq_s8((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vmaxvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxvq_f64((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai float64_t vmaxvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxvq_f64((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxvq_f32((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxvq_f32((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vmaxvq_s32(int32x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxvq_s32((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai int32_t vmaxvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxvq_s32((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vmaxvq_s16(int16x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxvq_s16((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai int16_t vmaxvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxvq_s16((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vmaxv_u8(uint8x8_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxv_u8((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vmaxv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxv_u8((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vmaxv_u32(uint32x2_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxv_u32((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vmaxv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxv_u32((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vmaxv_u16(uint16x4_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxv_u16((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vmaxv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxv_u16((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vmaxv_s8(int8x8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxv_s8((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai int8_t vmaxv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxv_s8((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxv_f32((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxv_f32((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vmaxv_s32(int32x2_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxv_s32((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai int32_t vmaxv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxv_s32((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vmaxv_s16(int16x4_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxv_s16((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai int16_t vmaxv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxv_s16((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vminnmvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminnmvq_f64((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai float64_t vminnmvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminnmvq_f64((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminnmvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmvq_f32((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminnmvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmvq_f32((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminnmv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmv_f32((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminnmv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmv_f32((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vminvq_u8(uint8x16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminvq_u8((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vminvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminvq_u8((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vminvq_u32(uint32x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminvq_u32((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vminvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminvq_u32((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vminvq_u16(uint16x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminvq_u16((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vminvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminvq_u16((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vminvq_s8(int8x16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminvq_s8((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai int8_t vminvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminvq_s8((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vminvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminvq_f64((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai float64_t vminvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminvq_f64((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminvq_f32((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminvq_f32((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vminvq_s32(int32x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminvq_s32((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai int32_t vminvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminvq_s32((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vminvq_s16(int16x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminvq_s16((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai int16_t vminvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminvq_s16((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vminv_u8(uint8x8_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminv_u8((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vminv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminv_u8((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vminv_u32(uint32x2_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminv_u32((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vminv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminv_u32((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vminv_u16(uint16x4_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminv_u16((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vminv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminv_u16((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vminv_s8(int8x8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminv_s8((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai int8_t vminv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminv_s8((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminv_f32((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminv_f32((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vminv_s32(int32x2_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminv_s32((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai int32_t vminv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminv_s32((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vminv_s16(int16x4_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminv_s16((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai int16_t vminv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminv_s16((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x8_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmlaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __ret;
-  __ret = __p0 + __p1 * (float64x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai float64x2_t vmlaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 + __rev1 * (float64x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x8_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __ret; \
-  __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
-  __ret; \
-})
-#else
-#define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmlsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __ret;
-  __ret = __p0 - __p1 * (float64x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai float64x2_t vmlsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 - __rev1 * (float64x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai poly64x1_t vmov_n_p64(poly64_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmovq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float64x2_t vmovq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmov_n_f64(float64_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_120) {
-  uint16x8_t __ret_120;
-  uint8x8_t __a1_120 = vget_high_u8(__p0_120);
-  __ret_120 = (uint16x8_t)(vshll_n_u8(__a1_120, 0));
-  return __ret_120;
-}
-#else
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_121) {
-  uint8x16_t __rev0_121;  __rev0_121 = __builtin_shufflevector(__p0_121, __p0_121, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret_121;
-  uint8x8_t __a1_121 = __noswap_vget_high_u8(__rev0_121);
-  __ret_121 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_121, 0));
-  __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_121;
-}
-__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_122) {
-  uint16x8_t __ret_122;
-  uint8x8_t __a1_122 = __noswap_vget_high_u8(__p0_122);
-  __ret_122 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_122, 0));
-  return __ret_122;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_123) {
-  uint64x2_t __ret_123;
-  uint32x2_t __a1_123 = vget_high_u32(__p0_123);
-  __ret_123 = (uint64x2_t)(vshll_n_u32(__a1_123, 0));
-  return __ret_123;
-}
-#else
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_124) {
-  uint32x4_t __rev0_124;  __rev0_124 = __builtin_shufflevector(__p0_124, __p0_124, 3, 2, 1, 0);
-  uint64x2_t __ret_124;
-  uint32x2_t __a1_124 = __noswap_vget_high_u32(__rev0_124);
-  __ret_124 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_124, 0));
-  __ret_124 = __builtin_shufflevector(__ret_124, __ret_124, 1, 0);
-  return __ret_124;
-}
-__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_125) {
-  uint64x2_t __ret_125;
-  uint32x2_t __a1_125 = __noswap_vget_high_u32(__p0_125);
-  __ret_125 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_125, 0));
-  return __ret_125;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_126) {
-  uint32x4_t __ret_126;
-  uint16x4_t __a1_126 = vget_high_u16(__p0_126);
-  __ret_126 = (uint32x4_t)(vshll_n_u16(__a1_126, 0));
-  return __ret_126;
-}
-#else
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_127) {
-  uint16x8_t __rev0_127;  __rev0_127 = __builtin_shufflevector(__p0_127, __p0_127, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret_127;
-  uint16x4_t __a1_127 = __noswap_vget_high_u16(__rev0_127);
-  __ret_127 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_127, 0));
-  __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 3, 2, 1, 0);
-  return __ret_127;
-}
-__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_128) {
-  uint32x4_t __ret_128;
-  uint16x4_t __a1_128 = __noswap_vget_high_u16(__p0_128);
-  __ret_128 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_128, 0));
-  return __ret_128;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_129) {
-  int16x8_t __ret_129;
-  int8x8_t __a1_129 = vget_high_s8(__p0_129);
-  __ret_129 = (int16x8_t)(vshll_n_s8(__a1_129, 0));
-  return __ret_129;
-}
-#else
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_130) {
-  int8x16_t __rev0_130;  __rev0_130 = __builtin_shufflevector(__p0_130, __p0_130, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret_130;
-  int8x8_t __a1_130 = __noswap_vget_high_s8(__rev0_130);
-  __ret_130 = (int16x8_t)(__noswap_vshll_n_s8(__a1_130, 0));
-  __ret_130 = __builtin_shufflevector(__ret_130, __ret_130, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_130;
-}
-__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_131) {
-  int16x8_t __ret_131;
-  int8x8_t __a1_131 = __noswap_vget_high_s8(__p0_131);
-  __ret_131 = (int16x8_t)(__noswap_vshll_n_s8(__a1_131, 0));
-  return __ret_131;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_132) {
-  int64x2_t __ret_132;
-  int32x2_t __a1_132 = vget_high_s32(__p0_132);
-  __ret_132 = (int64x2_t)(vshll_n_s32(__a1_132, 0));
-  return __ret_132;
-}
-#else
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_133) {
-  int32x4_t __rev0_133;  __rev0_133 = __builtin_shufflevector(__p0_133, __p0_133, 3, 2, 1, 0);
-  int64x2_t __ret_133;
-  int32x2_t __a1_133 = __noswap_vget_high_s32(__rev0_133);
-  __ret_133 = (int64x2_t)(__noswap_vshll_n_s32(__a1_133, 0));
-  __ret_133 = __builtin_shufflevector(__ret_133, __ret_133, 1, 0);
-  return __ret_133;
-}
-__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_134) {
-  int64x2_t __ret_134;
-  int32x2_t __a1_134 = __noswap_vget_high_s32(__p0_134);
-  __ret_134 = (int64x2_t)(__noswap_vshll_n_s32(__a1_134, 0));
-  return __ret_134;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_135) {
-  int32x4_t __ret_135;
-  int16x4_t __a1_135 = vget_high_s16(__p0_135);
-  __ret_135 = (int32x4_t)(vshll_n_s16(__a1_135, 0));
-  return __ret_135;
-}
-#else
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_136) {
-  int16x8_t __rev0_136;  __rev0_136 = __builtin_shufflevector(__p0_136, __p0_136, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret_136;
-  int16x4_t __a1_136 = __noswap_vget_high_s16(__rev0_136);
-  __ret_136 = (int32x4_t)(__noswap_vshll_n_s16(__a1_136, 0));
-  __ret_136 = __builtin_shufflevector(__ret_136, __ret_136, 3, 2, 1, 0);
-  return __ret_136;
-}
-__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_137) {
-  int32x4_t __ret_137;
-  int16x4_t __a1_137 = __noswap_vget_high_s16(__p0_137);
-  __ret_137 = (int32x4_t)(__noswap_vshll_n_s16(__a1_137, 0));
-  return __ret_137;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vmovn_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vmovn_u64(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vmovn_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vmovn_s32(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vmovn_s64(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vmovn_s16(__p1));
-  return __ret;
-}
-#else
-__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#define vmuld_lane_f64(__p0_138, __p1_138, __p2_138) __extension__ ({ \
-  float64_t __s0_138 = __p0_138; \
-  float64x1_t __s1_138 = __p1_138; \
-  float64_t __ret_138; \
-  __ret_138 = __s0_138 * vget_lane_f64(__s1_138, __p2_138); \
-  __ret_138; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmuls_lane_f32(__p0_139, __p1_139, __p2_139) __extension__ ({ \
-  float32_t __s0_139 = __p0_139; \
-  float32x2_t __s1_139 = __p1_139; \
-  float32_t __ret_139; \
-  __ret_139 = __s0_139 * vget_lane_f32(__s1_139, __p2_139); \
-  __ret_139; \
-})
-#else
-#define vmuls_lane_f32(__p0_140, __p1_140, __p2_140) __extension__ ({ \
-  float32_t __s0_140 = __p0_140; \
-  float32x2_t __s1_140 = __p1_140; \
-  float32x2_t __rev1_140;  __rev1_140 = __builtin_shufflevector(__s1_140, __s1_140, 1, 0); \
-  float32_t __ret_140; \
-  __ret_140 = __s0_140 * __noswap_vget_lane_f32(__rev1_140, __p2_140); \
-  __ret_140; \
-})
-#endif
-
-#define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmuld_laneq_f64(__p0_141, __p1_141, __p2_141) __extension__ ({ \
-  float64_t __s0_141 = __p0_141; \
-  float64x2_t __s1_141 = __p1_141; \
-  float64_t __ret_141; \
-  __ret_141 = __s0_141 * vgetq_lane_f64(__s1_141, __p2_141); \
-  __ret_141; \
-})
-#else
-#define vmuld_laneq_f64(__p0_142, __p1_142, __p2_142) __extension__ ({ \
-  float64_t __s0_142 = __p0_142; \
-  float64x2_t __s1_142 = __p1_142; \
-  float64x2_t __rev1_142;  __rev1_142 = __builtin_shufflevector(__s1_142, __s1_142, 1, 0); \
-  float64_t __ret_142; \
-  __ret_142 = __s0_142 * __noswap_vgetq_lane_f64(__rev1_142, __p2_142); \
-  __ret_142; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmuls_laneq_f32(__p0_143, __p1_143, __p2_143) __extension__ ({ \
-  float32_t __s0_143 = __p0_143; \
-  float32x4_t __s1_143 = __p1_143; \
-  float32_t __ret_143; \
-  __ret_143 = __s0_143 * vgetq_lane_f32(__s1_143, __p2_143); \
-  __ret_143; \
-})
-#else
-#define vmuls_laneq_f32(__p0_144, __p1_144, __p2_144) __extension__ ({ \
-  float32_t __s0_144 = __p0_144; \
-  float32x4_t __s1_144 = __p1_144; \
-  float32x4_t __rev1_144;  __rev1_144 = __builtin_shufflevector(__s1_144, __s1_144, 3, 2, 1, 0); \
-  float32_t __ret_144; \
-  __ret_144 = __s0_144 * __noswap_vgetq_lane_f32(__rev1_144, __p2_144); \
-  __ret_144; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \
-  __ret; \
-})
-#else
-#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
-  __ret; \
-})
-#else
-#define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmul_n_f64((int8x8_t)__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 * (float64x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 * (float64x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) {
-  poly128_t __ret;
-  __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly16x8_t __ret;
-  __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1));
-  return __ret;
-}
-#else
-__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly128_t __ret;
-  __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1)));
-  return __ret;
-}
-#else
-__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly128_t __ret;
-  __ret = vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1)));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmull_n_u32(vget_high_u32(__p0), __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmull_n_u16(vget_high_u16(__p0), __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vmull_n_s32(vget_high_s32(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vmull_n_s16(vget_high_s16(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#endif
-
-__ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
-  return __ret;
-}
-#define vmulxd_lane_f64(__p0_145, __p1_145, __p2_145) __extension__ ({ \
-  float64_t __s0_145 = __p0_145; \
-  float64x1_t __s1_145 = __p1_145; \
-  float64_t __ret_145; \
-  __ret_145 = vmulxd_f64(__s0_145, vget_lane_f64(__s1_145, __p2_145)); \
-  __ret_145; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmulxs_lane_f32(__p0_146, __p1_146, __p2_146) __extension__ ({ \
-  float32_t __s0_146 = __p0_146; \
-  float32x2_t __s1_146 = __p1_146; \
-  float32_t __ret_146; \
-  __ret_146 = vmulxs_f32(__s0_146, vget_lane_f32(__s1_146, __p2_146)); \
-  __ret_146; \
-})
-#else
-#define vmulxs_lane_f32(__p0_147, __p1_147, __p2_147) __extension__ ({ \
-  float32_t __s0_147 = __p0_147; \
-  float32x2_t __s1_147 = __p1_147; \
-  float32x2_t __rev1_147;  __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 1, 0); \
-  float32_t __ret_147; \
-  __ret_147 = vmulxs_f32(__s0_147, __noswap_vget_lane_f32(__rev1_147, __p2_147)); \
-  __ret_147; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxd_laneq_f64(__p0_148, __p1_148, __p2_148) __extension__ ({ \
-  float64_t __s0_148 = __p0_148; \
-  float64x2_t __s1_148 = __p1_148; \
-  float64_t __ret_148; \
-  __ret_148 = vmulxd_f64(__s0_148, vgetq_lane_f64(__s1_148, __p2_148)); \
-  __ret_148; \
-})
-#else
-#define vmulxd_laneq_f64(__p0_149, __p1_149, __p2_149) __extension__ ({ \
-  float64_t __s0_149 = __p0_149; \
-  float64x2_t __s1_149 = __p1_149; \
-  float64x2_t __rev1_149;  __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 1, 0); \
-  float64_t __ret_149; \
-  __ret_149 = vmulxd_f64(__s0_149, __noswap_vgetq_lane_f64(__rev1_149, __p2_149)); \
-  __ret_149; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxs_laneq_f32(__p0_150, __p1_150, __p2_150) __extension__ ({ \
-  float32_t __s0_150 = __p0_150; \
-  float32x4_t __s1_150 = __p1_150; \
-  float32_t __ret_150; \
-  __ret_150 = vmulxs_f32(__s0_150, vgetq_lane_f32(__s1_150, __p2_150)); \
-  __ret_150; \
-})
-#else
-#define vmulxs_laneq_f32(__p0_151, __p1_151, __p2_151) __extension__ ({ \
-  float32_t __s0_151 = __p0_151; \
-  float32x4_t __s1_151 = __p1_151; \
-  float32x4_t __rev1_151;  __rev1_151 = __builtin_shufflevector(__s1_151, __s1_151, 3, 2, 1, 0); \
-  float32_t __ret_151; \
-  __ret_151 = vmulxs_f32(__s0_151, __noswap_vgetq_lane_f32(__rev1_151, __p2_151)); \
-  __ret_151; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vnegq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float64x2_t vnegq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vnegq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int64x2_t vnegq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vneg_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-__ai int64x1_t vneg_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-__ai int64_t vnegd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vnegd_s64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vpaddd_u64(uint64x2_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vpaddd_u64((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vpaddd_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vpaddd_u64((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpaddd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpaddd_f64((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpaddd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpaddd_f64((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vpaddd_s64(int64x2_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vpaddd_s64((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai int64_t vpaddd_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vpaddd_s64((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpadds_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpadds_f32((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpadds_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpadds_f32((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpmaxqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxqd_f64((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpmaxqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxqd_f64((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpmaxs_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxs_f32((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpmaxs_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxs_f32((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpmaxnms_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxnms_f32((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpmaxnms_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxnms_f32((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpminqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminqd_f64((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpminqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminqd_f64((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpmins_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmins_f32((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpmins_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmins_f32((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpminnmqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminnmqd_f64((int8x16_t)__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpminnmqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminnmqd_f64((int8x16_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpminnms_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpminnms_f32((int8x8_t)__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpminnms_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpminnms_f32((int8x8_t)__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqabsq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqabsq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqabs_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai int8_t vqabsb_s8(int8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0);
-  return __ret;
-}
-__ai int32_t vqabss_s32(int32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqabss_s32(__p0);
-  return __ret;
-}
-__ai int64_t vqabsd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0);
-  return __ret;
-}
-__ai int16_t vqabsh_s16(int16_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0);
-  return __ret;
-}
-__ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2);
-  return __ret;
-}
-__ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, (int8x8_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, (int8x8_t)__rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, (int8x8_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, (int8x8_t)__rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, (int8x16_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, (int8x16_t)__rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, (int8x16_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, (int8x16_t)__rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2);
-  return __ret;
-}
-__ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, (int8x8_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, (int8x8_t)__rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, (int8x8_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, (int8x8_t)__rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, (int8x16_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, (int8x16_t)__rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, (int8x16_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, (int8x16_t)__rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_lane_s32(__p0_152, __p1_152, __p2_152) __extension__ ({ \
-  int32_t __s0_152 = __p0_152; \
-  int32x2_t __s1_152 = __p1_152; \
-  int32_t __ret_152; \
-  __ret_152 = vqdmulhs_s32(__s0_152, vget_lane_s32(__s1_152, __p2_152)); \
-  __ret_152; \
-})
-#else
-#define vqdmulhs_lane_s32(__p0_153, __p1_153, __p2_153) __extension__ ({ \
-  int32_t __s0_153 = __p0_153; \
-  int32x2_t __s1_153 = __p1_153; \
-  int32x2_t __rev1_153;  __rev1_153 = __builtin_shufflevector(__s1_153, __s1_153, 1, 0); \
-  int32_t __ret_153; \
-  __ret_153 = vqdmulhs_s32(__s0_153, __noswap_vget_lane_s32(__rev1_153, __p2_153)); \
-  __ret_153; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_lane_s16(__p0_154, __p1_154, __p2_154) __extension__ ({ \
-  int16_t __s0_154 = __p0_154; \
-  int16x4_t __s1_154 = __p1_154; \
-  int16_t __ret_154; \
-  __ret_154 = vqdmulhh_s16(__s0_154, vget_lane_s16(__s1_154, __p2_154)); \
-  __ret_154; \
-})
-#else
-#define vqdmulhh_lane_s16(__p0_155, __p1_155, __p2_155) __extension__ ({ \
-  int16_t __s0_155 = __p0_155; \
-  int16x4_t __s1_155 = __p1_155; \
-  int16x4_t __rev1_155;  __rev1_155 = __builtin_shufflevector(__s1_155, __s1_155, 3, 2, 1, 0); \
-  int16_t __ret_155; \
-  __ret_155 = vqdmulhh_s16(__s0_155, __noswap_vget_lane_s16(__rev1_155, __p2_155)); \
-  __ret_155; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_laneq_s32(__p0_156, __p1_156, __p2_156) __extension__ ({ \
-  int32_t __s0_156 = __p0_156; \
-  int32x4_t __s1_156 = __p1_156; \
-  int32_t __ret_156; \
-  __ret_156 = vqdmulhs_s32(__s0_156, vgetq_lane_s32(__s1_156, __p2_156)); \
-  __ret_156; \
-})
-#else
-#define vqdmulhs_laneq_s32(__p0_157, __p1_157, __p2_157) __extension__ ({ \
-  int32_t __s0_157 = __p0_157; \
-  int32x4_t __s1_157 = __p1_157; \
-  int32x4_t __rev1_157;  __rev1_157 = __builtin_shufflevector(__s1_157, __s1_157, 3, 2, 1, 0); \
-  int32_t __ret_157; \
-  __ret_157 = vqdmulhs_s32(__s0_157, __noswap_vgetq_lane_s32(__rev1_157, __p2_157)); \
-  __ret_157; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_laneq_s16(__p0_158, __p1_158, __p2_158) __extension__ ({ \
-  int16_t __s0_158 = __p0_158; \
-  int16x8_t __s1_158 = __p1_158; \
-  int16_t __ret_158; \
-  __ret_158 = vqdmulhh_s16(__s0_158, vgetq_lane_s16(__s1_158, __p2_158)); \
-  __ret_158; \
-})
-#else
-#define vqdmulhh_laneq_s16(__p0_159, __p1_159, __p2_159) __extension__ ({ \
-  int16_t __s0_159 = __p0_159; \
-  int16x8_t __s1_159 = __p1_159; \
-  int16x8_t __rev1_159;  __rev1_159 = __builtin_shufflevector(__s1_159, __s1_159, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_159; \
-  __ret_159 = vqdmulhh_s16(__s0_159, __noswap_vgetq_lane_s16(__rev1_159, __p2_159)); \
-  __ret_159; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = vqdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = vqdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = vqdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulls_lane_s32(__p0_160, __p1_160, __p2_160) __extension__ ({ \
-  int32_t __s0_160 = __p0_160; \
-  int32x2_t __s1_160 = __p1_160; \
-  int64_t __ret_160; \
-  __ret_160 = vqdmulls_s32(__s0_160, vget_lane_s32(__s1_160, __p2_160)); \
-  __ret_160; \
-})
-#else
-#define vqdmulls_lane_s32(__p0_161, __p1_161, __p2_161) __extension__ ({ \
-  int32_t __s0_161 = __p0_161; \
-  int32x2_t __s1_161 = __p1_161; \
-  int32x2_t __rev1_161;  __rev1_161 = __builtin_shufflevector(__s1_161, __s1_161, 1, 0); \
-  int64_t __ret_161; \
-  __ret_161 = vqdmulls_s32(__s0_161, __noswap_vget_lane_s32(__rev1_161, __p2_161)); \
-  __ret_161; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmullh_lane_s16(__p0_162, __p1_162, __p2_162) __extension__ ({ \
-  int16_t __s0_162 = __p0_162; \
-  int16x4_t __s1_162 = __p1_162; \
-  int32_t __ret_162; \
-  __ret_162 = vqdmullh_s16(__s0_162, vget_lane_s16(__s1_162, __p2_162)); \
-  __ret_162; \
-})
-#else
-#define vqdmullh_lane_s16(__p0_163, __p1_163, __p2_163) __extension__ ({ \
-  int16_t __s0_163 = __p0_163; \
-  int16x4_t __s1_163 = __p1_163; \
-  int16x4_t __rev1_163;  __rev1_163 = __builtin_shufflevector(__s1_163, __s1_163, 3, 2, 1, 0); \
-  int32_t __ret_163; \
-  __ret_163 = vqdmullh_s16(__s0_163, __noswap_vget_lane_s16(__rev1_163, __p2_163)); \
-  __ret_163; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulls_laneq_s32(__p0_164, __p1_164, __p2_164) __extension__ ({ \
-  int32_t __s0_164 = __p0_164; \
-  int32x4_t __s1_164 = __p1_164; \
-  int64_t __ret_164; \
-  __ret_164 = vqdmulls_s32(__s0_164, vgetq_lane_s32(__s1_164, __p2_164)); \
-  __ret_164; \
-})
-#else
-#define vqdmulls_laneq_s32(__p0_165, __p1_165, __p2_165) __extension__ ({ \
-  int32_t __s0_165 = __p0_165; \
-  int32x4_t __s1_165 = __p1_165; \
-  int32x4_t __rev1_165;  __rev1_165 = __builtin_shufflevector(__s1_165, __s1_165, 3, 2, 1, 0); \
-  int64_t __ret_165; \
-  __ret_165 = vqdmulls_s32(__s0_165, __noswap_vgetq_lane_s32(__rev1_165, __p2_165)); \
-  __ret_165; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmullh_laneq_s16(__p0_166, __p1_166, __p2_166) __extension__ ({ \
-  int16_t __s0_166 = __p0_166; \
-  int16x8_t __s1_166 = __p1_166; \
-  int32_t __ret_166; \
-  __ret_166 = vqdmullh_s16(__s0_166, vgetq_lane_s16(__s1_166, __p2_166)); \
-  __ret_166; \
-})
-#else
-#define vqdmullh_laneq_s16(__p0_167, __p1_167, __p2_167) __extension__ ({ \
-  int16_t __s0_167 = __p0_167; \
-  int16x8_t __s1_167 = __p1_167; \
-  int16x8_t __rev1_167;  __rev1_167 = __builtin_shufflevector(__s1_167, __s1_167, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret_167; \
-  __ret_167 = vqdmullh_s16(__s0_167, __noswap_vgetq_lane_s16(__rev1_167, __p2_167)); \
-  __ret_167; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai int16_t vqmovns_s32(int32_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0);
-  return __ret;
-}
-__ai int32_t vqmovnd_s64(int64_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0);
-  return __ret;
-}
-__ai int8_t vqmovnh_s16(int16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0);
-  return __ret;
-}
-__ai uint16_t vqmovns_u32(uint32_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0);
-  return __ret;
-}
-__ai uint32_t vqmovnd_u64(uint64_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0);
-  return __ret;
-}
-__ai uint8_t vqmovnh_u16(uint16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vqmovn_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vqmovn_u64(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vqmovn_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vqmovn_s32(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vqmovn_s64(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vqmovn_s16(__p1));
-  return __ret;
-}
-#else
-__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int16_t vqmovuns_s32(int32_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqmovuns_s32(__p0);
-  return __ret;
-}
-__ai int32_t vqmovund_s64(int64_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqmovund_s64(__p0);
-  return __ret;
-}
-__ai int8_t vqmovunh_s16(int16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqmovunh_s16(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1));
-  return __ret;
-}
-#else
-__ai uint8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqnegq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqnegq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqneg_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai int8_t vqnegb_s8(int8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0);
-  return __ret;
-}
-__ai int32_t vqnegs_s32(int32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0);
-  return __ret;
-}
-__ai int64_t vqnegd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0);
-  return __ret;
-}
-__ai int16_t vqnegh_s16(int16_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0);
-  return __ret;
-}
-__ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_lane_s32(__p0_168, __p1_168, __p2_168) __extension__ ({ \
-  int32_t __s0_168 = __p0_168; \
-  int32x2_t __s1_168 = __p1_168; \
-  int32_t __ret_168; \
-  __ret_168 = vqrdmulhs_s32(__s0_168, vget_lane_s32(__s1_168, __p2_168)); \
-  __ret_168; \
-})
-#else
-#define vqrdmulhs_lane_s32(__p0_169, __p1_169, __p2_169) __extension__ ({ \
-  int32_t __s0_169 = __p0_169; \
-  int32x2_t __s1_169 = __p1_169; \
-  int32x2_t __rev1_169;  __rev1_169 = __builtin_shufflevector(__s1_169, __s1_169, 1, 0); \
-  int32_t __ret_169; \
-  __ret_169 = vqrdmulhs_s32(__s0_169, __noswap_vget_lane_s32(__rev1_169, __p2_169)); \
-  __ret_169; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_lane_s16(__p0_170, __p1_170, __p2_170) __extension__ ({ \
-  int16_t __s0_170 = __p0_170; \
-  int16x4_t __s1_170 = __p1_170; \
-  int16_t __ret_170; \
-  __ret_170 = vqrdmulhh_s16(__s0_170, vget_lane_s16(__s1_170, __p2_170)); \
-  __ret_170; \
-})
-#else
-#define vqrdmulhh_lane_s16(__p0_171, __p1_171, __p2_171) __extension__ ({ \
-  int16_t __s0_171 = __p0_171; \
-  int16x4_t __s1_171 = __p1_171; \
-  int16x4_t __rev1_171;  __rev1_171 = __builtin_shufflevector(__s1_171, __s1_171, 3, 2, 1, 0); \
-  int16_t __ret_171; \
-  __ret_171 = vqrdmulhh_s16(__s0_171, __noswap_vget_lane_s16(__rev1_171, __p2_171)); \
-  __ret_171; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_laneq_s32(__p0_172, __p1_172, __p2_172) __extension__ ({ \
-  int32_t __s0_172 = __p0_172; \
-  int32x4_t __s1_172 = __p1_172; \
-  int32_t __ret_172; \
-  __ret_172 = vqrdmulhs_s32(__s0_172, vgetq_lane_s32(__s1_172, __p2_172)); \
-  __ret_172; \
-})
-#else
-#define vqrdmulhs_laneq_s32(__p0_173, __p1_173, __p2_173) __extension__ ({ \
-  int32_t __s0_173 = __p0_173; \
-  int32x4_t __s1_173 = __p1_173; \
-  int32x4_t __rev1_173;  __rev1_173 = __builtin_shufflevector(__s1_173, __s1_173, 3, 2, 1, 0); \
-  int32_t __ret_173; \
-  __ret_173 = vqrdmulhs_s32(__s0_173, __noswap_vgetq_lane_s32(__rev1_173, __p2_173)); \
-  __ret_173; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_laneq_s16(__p0_174, __p1_174, __p2_174) __extension__ ({ \
-  int16_t __s0_174 = __p0_174; \
-  int16x8_t __s1_174 = __p1_174; \
-  int16_t __ret_174; \
-  __ret_174 = vqrdmulhh_s16(__s0_174, vgetq_lane_s16(__s1_174, __p2_174)); \
-  __ret_174; \
-})
-#else
-#define vqrdmulhh_laneq_s16(__p0_175, __p1_175, __p2_175) __extension__ ({ \
-  int16_t __s0_175 = __p0_175; \
-  int16x8_t __s1_175 = __p1_175; \
-  int16x8_t __rev1_175;  __rev1_175 = __builtin_shufflevector(__s1_175, __s1_175, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_175; \
-  __ret_175 = vqrdmulhh_s16(__s0_175, __noswap_vgetq_lane_s16(__rev1_175, __p2_175)); \
-  __ret_175; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = vqrdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __noswap_vqrdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = vqrdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = __noswap_vqrdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = vqrdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = __noswap_vqrdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = vqrdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
-  __ret; \
-})
-#else
-#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = __noswap_vqrdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai uint8_t vqrshlb_u8(uint8_t __p0, uint8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqrshls_u32(uint32_t __p0, uint32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqrshld_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqrshlh_u16(uint16_t __p0, uint16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u32(__p0_176, __p1_176, __p2_176) __extension__ ({ \
-  uint16x4_t __s0_176 = __p0_176; \
-  uint32x4_t __s1_176 = __p1_176; \
-  uint16x8_t __ret_176; \
-  __ret_176 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_176), (uint16x4_t)(vqrshrn_n_u32(__s1_176, __p2_176)))); \
-  __ret_176; \
-})
-#else
-#define vqrshrn_high_n_u32(__p0_177, __p1_177, __p2_177) __extension__ ({ \
-  uint16x4_t __s0_177 = __p0_177; \
-  uint32x4_t __s1_177 = __p1_177; \
-  uint16x4_t __rev0_177;  __rev0_177 = __builtin_shufflevector(__s0_177, __s0_177, 3, 2, 1, 0); \
-  uint32x4_t __rev1_177;  __rev1_177 = __builtin_shufflevector(__s1_177, __s1_177, 3, 2, 1, 0); \
-  uint16x8_t __ret_177; \
-  __ret_177 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_177), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_177, __p2_177)))); \
-  __ret_177 = __builtin_shufflevector(__ret_177, __ret_177, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_177; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u64(__p0_178, __p1_178, __p2_178) __extension__ ({ \
-  uint32x2_t __s0_178 = __p0_178; \
-  uint64x2_t __s1_178 = __p1_178; \
-  uint32x4_t __ret_178; \
-  __ret_178 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_178), (uint32x2_t)(vqrshrn_n_u64(__s1_178, __p2_178)))); \
-  __ret_178; \
-})
-#else
-#define vqrshrn_high_n_u64(__p0_179, __p1_179, __p2_179) __extension__ ({ \
-  uint32x2_t __s0_179 = __p0_179; \
-  uint64x2_t __s1_179 = __p1_179; \
-  uint32x2_t __rev0_179;  __rev0_179 = __builtin_shufflevector(__s0_179, __s0_179, 1, 0); \
-  uint64x2_t __rev1_179;  __rev1_179 = __builtin_shufflevector(__s1_179, __s1_179, 1, 0); \
-  uint32x4_t __ret_179; \
-  __ret_179 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_179), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_179, __p2_179)))); \
-  __ret_179 = __builtin_shufflevector(__ret_179, __ret_179, 3, 2, 1, 0); \
-  __ret_179; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u16(__p0_180, __p1_180, __p2_180) __extension__ ({ \
-  uint8x8_t __s0_180 = __p0_180; \
-  uint16x8_t __s1_180 = __p1_180; \
-  uint8x16_t __ret_180; \
-  __ret_180 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_180), (uint8x8_t)(vqrshrn_n_u16(__s1_180, __p2_180)))); \
-  __ret_180; \
-})
-#else
-#define vqrshrn_high_n_u16(__p0_181, __p1_181, __p2_181) __extension__ ({ \
-  uint8x8_t __s0_181 = __p0_181; \
-  uint16x8_t __s1_181 = __p1_181; \
-  uint8x8_t __rev0_181;  __rev0_181 = __builtin_shufflevector(__s0_181, __s0_181, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_181;  __rev1_181 = __builtin_shufflevector(__s1_181, __s1_181, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_181; \
-  __ret_181 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_181), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_181, __p2_181)))); \
-  __ret_181 = __builtin_shufflevector(__ret_181, __ret_181, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_181; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s32(__p0_182, __p1_182, __p2_182) __extension__ ({ \
-  int16x4_t __s0_182 = __p0_182; \
-  int32x4_t __s1_182 = __p1_182; \
-  int16x8_t __ret_182; \
-  __ret_182 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_182), (int16x4_t)(vqrshrn_n_s32(__s1_182, __p2_182)))); \
-  __ret_182; \
-})
-#else
-#define vqrshrn_high_n_s32(__p0_183, __p1_183, __p2_183) __extension__ ({ \
-  int16x4_t __s0_183 = __p0_183; \
-  int32x4_t __s1_183 = __p1_183; \
-  int16x4_t __rev0_183;  __rev0_183 = __builtin_shufflevector(__s0_183, __s0_183, 3, 2, 1, 0); \
-  int32x4_t __rev1_183;  __rev1_183 = __builtin_shufflevector(__s1_183, __s1_183, 3, 2, 1, 0); \
-  int16x8_t __ret_183; \
-  __ret_183 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_183), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_183, __p2_183)))); \
-  __ret_183 = __builtin_shufflevector(__ret_183, __ret_183, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_183; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s64(__p0_184, __p1_184, __p2_184) __extension__ ({ \
-  int32x2_t __s0_184 = __p0_184; \
-  int64x2_t __s1_184 = __p1_184; \
-  int32x4_t __ret_184; \
-  __ret_184 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_184), (int32x2_t)(vqrshrn_n_s64(__s1_184, __p2_184)))); \
-  __ret_184; \
-})
-#else
-#define vqrshrn_high_n_s64(__p0_185, __p1_185, __p2_185) __extension__ ({ \
-  int32x2_t __s0_185 = __p0_185; \
-  int64x2_t __s1_185 = __p1_185; \
-  int32x2_t __rev0_185;  __rev0_185 = __builtin_shufflevector(__s0_185, __s0_185, 1, 0); \
-  int64x2_t __rev1_185;  __rev1_185 = __builtin_shufflevector(__s1_185, __s1_185, 1, 0); \
-  int32x4_t __ret_185; \
-  __ret_185 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_185), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_185, __p2_185)))); \
-  __ret_185 = __builtin_shufflevector(__ret_185, __ret_185, 3, 2, 1, 0); \
-  __ret_185; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s16(__p0_186, __p1_186, __p2_186) __extension__ ({ \
-  int8x8_t __s0_186 = __p0_186; \
-  int16x8_t __s1_186 = __p1_186; \
-  int8x16_t __ret_186; \
-  __ret_186 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_186), (int8x8_t)(vqrshrn_n_s16(__s1_186, __p2_186)))); \
-  __ret_186; \
-})
-#else
-#define vqrshrn_high_n_s16(__p0_187, __p1_187, __p2_187) __extension__ ({ \
-  int8x8_t __s0_187 = __p0_187; \
-  int16x8_t __s1_187 = __p1_187; \
-  int8x8_t __rev0_187;  __rev0_187 = __builtin_shufflevector(__s0_187, __s0_187, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_187;  __rev1_187 = __builtin_shufflevector(__s1_187, __s1_187, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_187; \
-  __ret_187 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_187), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_187, __p2_187)))); \
-  __ret_187 = __builtin_shufflevector(__ret_187, __ret_187, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_187; \
-})
-#endif
-
-#define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s32(__p0_188, __p1_188, __p2_188) __extension__ ({ \
-  int16x4_t __s0_188 = __p0_188; \
-  int32x4_t __s1_188 = __p1_188; \
-  int16x8_t __ret_188; \
-  __ret_188 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_188), (int16x4_t)(vqrshrun_n_s32(__s1_188, __p2_188)))); \
-  __ret_188; \
-})
-#else
-#define vqrshrun_high_n_s32(__p0_189, __p1_189, __p2_189) __extension__ ({ \
-  int16x4_t __s0_189 = __p0_189; \
-  int32x4_t __s1_189 = __p1_189; \
-  int16x4_t __rev0_189;  __rev0_189 = __builtin_shufflevector(__s0_189, __s0_189, 3, 2, 1, 0); \
-  int32x4_t __rev1_189;  __rev1_189 = __builtin_shufflevector(__s1_189, __s1_189, 3, 2, 1, 0); \
-  int16x8_t __ret_189; \
-  __ret_189 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_189), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_189, __p2_189)))); \
-  __ret_189 = __builtin_shufflevector(__ret_189, __ret_189, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_189; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s64(__p0_190, __p1_190, __p2_190) __extension__ ({ \
-  int32x2_t __s0_190 = __p0_190; \
-  int64x2_t __s1_190 = __p1_190; \
-  int32x4_t __ret_190; \
-  __ret_190 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_190), (int32x2_t)(vqrshrun_n_s64(__s1_190, __p2_190)))); \
-  __ret_190; \
-})
-#else
-#define vqrshrun_high_n_s64(__p0_191, __p1_191, __p2_191) __extension__ ({ \
-  int32x2_t __s0_191 = __p0_191; \
-  int64x2_t __s1_191 = __p1_191; \
-  int32x2_t __rev0_191;  __rev0_191 = __builtin_shufflevector(__s0_191, __s0_191, 1, 0); \
-  int64x2_t __rev1_191;  __rev1_191 = __builtin_shufflevector(__s1_191, __s1_191, 1, 0); \
-  int32x4_t __ret_191; \
-  __ret_191 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_191), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_191, __p2_191)))); \
-  __ret_191 = __builtin_shufflevector(__ret_191, __ret_191, 3, 2, 1, 0); \
-  __ret_191; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s16(__p0_192, __p1_192, __p2_192) __extension__ ({ \
-  int8x8_t __s0_192 = __p0_192; \
-  int16x8_t __s1_192 = __p1_192; \
-  int8x16_t __ret_192; \
-  __ret_192 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_192), (int8x8_t)(vqrshrun_n_s16(__s1_192, __p2_192)))); \
-  __ret_192; \
-})
-#else
-#define vqrshrun_high_n_s16(__p0_193, __p1_193, __p2_193) __extension__ ({ \
-  int8x8_t __s0_193 = __p0_193; \
-  int16x8_t __s1_193 = __p1_193; \
-  int8x8_t __rev0_193;  __rev0_193 = __builtin_shufflevector(__s0_193, __s0_193, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_193;  __rev1_193 = __builtin_shufflevector(__s1_193, __s1_193, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_193; \
-  __ret_193 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_193), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_193, __p2_193)))); \
-  __ret_193 = __builtin_shufflevector(__ret_193, __ret_193, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_193; \
-})
-#endif
-
-#define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \
-  __ret; \
-})
-__ai uint8_t vqshlb_u8(uint8_t __p0, uint8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqshls_u32(uint32_t __p0, uint32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqshld_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqshlh_u16(uint16_t __p0, uint16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1);
-  return __ret;
-}
-#define vqshlb_n_u8(__p0, __p1) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \
-  __ret; \
-})
-#define vqshls_n_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \
-  __ret; \
-})
-#define vqshld_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vqshlh_n_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \
-  __ret; \
-})
-#define vqshlb_n_s8(__p0, __p1) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \
-  __ret; \
-})
-#define vqshls_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshld_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshlh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#define vqshlub_n_s8(__p0, __p1) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \
-  __ret; \
-})
-#define vqshlus_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshlud_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshluh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u32(__p0_194, __p1_194, __p2_194) __extension__ ({ \
-  uint16x4_t __s0_194 = __p0_194; \
-  uint32x4_t __s1_194 = __p1_194; \
-  uint16x8_t __ret_194; \
-  __ret_194 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_194), (uint16x4_t)(vqshrn_n_u32(__s1_194, __p2_194)))); \
-  __ret_194; \
-})
-#else
-#define vqshrn_high_n_u32(__p0_195, __p1_195, __p2_195) __extension__ ({ \
-  uint16x4_t __s0_195 = __p0_195; \
-  uint32x4_t __s1_195 = __p1_195; \
-  uint16x4_t __rev0_195;  __rev0_195 = __builtin_shufflevector(__s0_195, __s0_195, 3, 2, 1, 0); \
-  uint32x4_t __rev1_195;  __rev1_195 = __builtin_shufflevector(__s1_195, __s1_195, 3, 2, 1, 0); \
-  uint16x8_t __ret_195; \
-  __ret_195 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_195), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_195, __p2_195)))); \
-  __ret_195 = __builtin_shufflevector(__ret_195, __ret_195, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_195; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u64(__p0_196, __p1_196, __p2_196) __extension__ ({ \
-  uint32x2_t __s0_196 = __p0_196; \
-  uint64x2_t __s1_196 = __p1_196; \
-  uint32x4_t __ret_196; \
-  __ret_196 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_196), (uint32x2_t)(vqshrn_n_u64(__s1_196, __p2_196)))); \
-  __ret_196; \
-})
-#else
-#define vqshrn_high_n_u64(__p0_197, __p1_197, __p2_197) __extension__ ({ \
-  uint32x2_t __s0_197 = __p0_197; \
-  uint64x2_t __s1_197 = __p1_197; \
-  uint32x2_t __rev0_197;  __rev0_197 = __builtin_shufflevector(__s0_197, __s0_197, 1, 0); \
-  uint64x2_t __rev1_197;  __rev1_197 = __builtin_shufflevector(__s1_197, __s1_197, 1, 0); \
-  uint32x4_t __ret_197; \
-  __ret_197 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_197), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_197, __p2_197)))); \
-  __ret_197 = __builtin_shufflevector(__ret_197, __ret_197, 3, 2, 1, 0); \
-  __ret_197; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u16(__p0_198, __p1_198, __p2_198) __extension__ ({ \
-  uint8x8_t __s0_198 = __p0_198; \
-  uint16x8_t __s1_198 = __p1_198; \
-  uint8x16_t __ret_198; \
-  __ret_198 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_198), (uint8x8_t)(vqshrn_n_u16(__s1_198, __p2_198)))); \
-  __ret_198; \
-})
-#else
-#define vqshrn_high_n_u16(__p0_199, __p1_199, __p2_199) __extension__ ({ \
-  uint8x8_t __s0_199 = __p0_199; \
-  uint16x8_t __s1_199 = __p1_199; \
-  uint8x8_t __rev0_199;  __rev0_199 = __builtin_shufflevector(__s0_199, __s0_199, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_199;  __rev1_199 = __builtin_shufflevector(__s1_199, __s1_199, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_199; \
-  __ret_199 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_199), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_199, __p2_199)))); \
-  __ret_199 = __builtin_shufflevector(__ret_199, __ret_199, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_199; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s32(__p0_200, __p1_200, __p2_200) __extension__ ({ \
-  int16x4_t __s0_200 = __p0_200; \
-  int32x4_t __s1_200 = __p1_200; \
-  int16x8_t __ret_200; \
-  __ret_200 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_200), (int16x4_t)(vqshrn_n_s32(__s1_200, __p2_200)))); \
-  __ret_200; \
-})
-#else
-#define vqshrn_high_n_s32(__p0_201, __p1_201, __p2_201) __extension__ ({ \
-  int16x4_t __s0_201 = __p0_201; \
-  int32x4_t __s1_201 = __p1_201; \
-  int16x4_t __rev0_201;  __rev0_201 = __builtin_shufflevector(__s0_201, __s0_201, 3, 2, 1, 0); \
-  int32x4_t __rev1_201;  __rev1_201 = __builtin_shufflevector(__s1_201, __s1_201, 3, 2, 1, 0); \
-  int16x8_t __ret_201; \
-  __ret_201 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_201), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_201, __p2_201)))); \
-  __ret_201 = __builtin_shufflevector(__ret_201, __ret_201, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_201; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s64(__p0_202, __p1_202, __p2_202) __extension__ ({ \
-  int32x2_t __s0_202 = __p0_202; \
-  int64x2_t __s1_202 = __p1_202; \
-  int32x4_t __ret_202; \
-  __ret_202 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_202), (int32x2_t)(vqshrn_n_s64(__s1_202, __p2_202)))); \
-  __ret_202; \
-})
-#else
-#define vqshrn_high_n_s64(__p0_203, __p1_203, __p2_203) __extension__ ({ \
-  int32x2_t __s0_203 = __p0_203; \
-  int64x2_t __s1_203 = __p1_203; \
-  int32x2_t __rev0_203;  __rev0_203 = __builtin_shufflevector(__s0_203, __s0_203, 1, 0); \
-  int64x2_t __rev1_203;  __rev1_203 = __builtin_shufflevector(__s1_203, __s1_203, 1, 0); \
-  int32x4_t __ret_203; \
-  __ret_203 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_203), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_203, __p2_203)))); \
-  __ret_203 = __builtin_shufflevector(__ret_203, __ret_203, 3, 2, 1, 0); \
-  __ret_203; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s16(__p0_204, __p1_204, __p2_204) __extension__ ({ \
-  int8x8_t __s0_204 = __p0_204; \
-  int16x8_t __s1_204 = __p1_204; \
-  int8x16_t __ret_204; \
-  __ret_204 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_204), (int8x8_t)(vqshrn_n_s16(__s1_204, __p2_204)))); \
-  __ret_204; \
-})
-#else
-#define vqshrn_high_n_s16(__p0_205, __p1_205, __p2_205) __extension__ ({ \
-  int8x8_t __s0_205 = __p0_205; \
-  int16x8_t __s1_205 = __p1_205; \
-  int8x8_t __rev0_205;  __rev0_205 = __builtin_shufflevector(__s0_205, __s0_205, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_205;  __rev1_205 = __builtin_shufflevector(__s1_205, __s1_205, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_205; \
-  __ret_205 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_205), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_205, __p2_205)))); \
-  __ret_205 = __builtin_shufflevector(__ret_205, __ret_205, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_205; \
-})
-#endif
-
-#define vqshrns_n_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \
-  __ret; \
-})
-#define vqshrns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s32(__p0_206, __p1_206, __p2_206) __extension__ ({ \
-  int16x4_t __s0_206 = __p0_206; \
-  int32x4_t __s1_206 = __p1_206; \
-  int16x8_t __ret_206; \
-  __ret_206 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_206), (int16x4_t)(vqshrun_n_s32(__s1_206, __p2_206)))); \
-  __ret_206; \
-})
-#else
-#define vqshrun_high_n_s32(__p0_207, __p1_207, __p2_207) __extension__ ({ \
-  int16x4_t __s0_207 = __p0_207; \
-  int32x4_t __s1_207 = __p1_207; \
-  int16x4_t __rev0_207;  __rev0_207 = __builtin_shufflevector(__s0_207, __s0_207, 3, 2, 1, 0); \
-  int32x4_t __rev1_207;  __rev1_207 = __builtin_shufflevector(__s1_207, __s1_207, 3, 2, 1, 0); \
-  int16x8_t __ret_207; \
-  __ret_207 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_207), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_207, __p2_207)))); \
-  __ret_207 = __builtin_shufflevector(__ret_207, __ret_207, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_207; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s64(__p0_208, __p1_208, __p2_208) __extension__ ({ \
-  int32x2_t __s0_208 = __p0_208; \
-  int64x2_t __s1_208 = __p1_208; \
-  int32x4_t __ret_208; \
-  __ret_208 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_208), (int32x2_t)(vqshrun_n_s64(__s1_208, __p2_208)))); \
-  __ret_208; \
-})
-#else
-#define vqshrun_high_n_s64(__p0_209, __p1_209, __p2_209) __extension__ ({ \
-  int32x2_t __s0_209 = __p0_209; \
-  int64x2_t __s1_209 = __p1_209; \
-  int32x2_t __rev0_209;  __rev0_209 = __builtin_shufflevector(__s0_209, __s0_209, 1, 0); \
-  int64x2_t __rev1_209;  __rev1_209 = __builtin_shufflevector(__s1_209, __s1_209, 1, 0); \
-  int32x4_t __ret_209; \
-  __ret_209 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_209), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_209, __p2_209)))); \
-  __ret_209 = __builtin_shufflevector(__ret_209, __ret_209, 3, 2, 1, 0); \
-  __ret_209; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s16(__p0_210, __p1_210, __p2_210) __extension__ ({ \
-  int8x8_t __s0_210 = __p0_210; \
-  int16x8_t __s1_210 = __p1_210; \
-  int8x16_t __ret_210; \
-  __ret_210 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_210), (int8x8_t)(vqshrun_n_s16(__s1_210, __p2_210)))); \
-  __ret_210; \
-})
-#else
-#define vqshrun_high_n_s16(__p0_211, __p1_211, __p2_211) __extension__ ({ \
-  int8x8_t __s0_211 = __p0_211; \
-  int16x8_t __s1_211 = __p1_211; \
-  int8x8_t __rev0_211;  __rev0_211 = __builtin_shufflevector(__s0_211, __s0_211, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_211;  __rev1_211 = __builtin_shufflevector(__s1_211, __s1_211, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_211; \
-  __ret_211 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_211), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_211, __p2_211)))); \
-  __ret_211 = __builtin_shufflevector(__ret_211, __ret_211, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_211; \
-})
-#endif
-
-#define vqshruns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshrund_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \
-  __ret; \
-})
-__ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
-  poly8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
-  poly8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) {
-  int8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
-  uint8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) {
-  int8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
-  poly8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
-  poly8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
-  uint8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) {
-  int8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
-  uint8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) {
-  int8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
-  poly8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
-  poly8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
-  uint8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) {
-  int8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
-  uint8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) {
-  int8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrbitq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vrbitq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrbit_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrbit_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrecpe_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-__ai float64_t vrecped_f64(float64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrecped_f64(__p0);
-  return __ret;
-}
-__ai float32_t vrecpes_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-__ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1);
-  return __ret;
-}
-__ai float64_t vrecpxd_f64(float64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0);
-  return __ret;
-}
-__ai float32_t vrecpxs_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vrshld_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1);
-  return __ret;
-}
-#define vrshrd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vrshrd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u32(__p0_212, __p1_212, __p2_212) __extension__ ({ \
-  uint16x4_t __s0_212 = __p0_212; \
-  uint32x4_t __s1_212 = __p1_212; \
-  uint16x8_t __ret_212; \
-  __ret_212 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_212), (uint16x4_t)(vrshrn_n_u32(__s1_212, __p2_212)))); \
-  __ret_212; \
-})
-#else
-#define vrshrn_high_n_u32(__p0_213, __p1_213, __p2_213) __extension__ ({ \
-  uint16x4_t __s0_213 = __p0_213; \
-  uint32x4_t __s1_213 = __p1_213; \
-  uint16x4_t __rev0_213;  __rev0_213 = __builtin_shufflevector(__s0_213, __s0_213, 3, 2, 1, 0); \
-  uint32x4_t __rev1_213;  __rev1_213 = __builtin_shufflevector(__s1_213, __s1_213, 3, 2, 1, 0); \
-  uint16x8_t __ret_213; \
-  __ret_213 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_213), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_213, __p2_213)))); \
-  __ret_213 = __builtin_shufflevector(__ret_213, __ret_213, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_213; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u64(__p0_214, __p1_214, __p2_214) __extension__ ({ \
-  uint32x2_t __s0_214 = __p0_214; \
-  uint64x2_t __s1_214 = __p1_214; \
-  uint32x4_t __ret_214; \
-  __ret_214 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_214), (uint32x2_t)(vrshrn_n_u64(__s1_214, __p2_214)))); \
-  __ret_214; \
-})
-#else
-#define vrshrn_high_n_u64(__p0_215, __p1_215, __p2_215) __extension__ ({ \
-  uint32x2_t __s0_215 = __p0_215; \
-  uint64x2_t __s1_215 = __p1_215; \
-  uint32x2_t __rev0_215;  __rev0_215 = __builtin_shufflevector(__s0_215, __s0_215, 1, 0); \
-  uint64x2_t __rev1_215;  __rev1_215 = __builtin_shufflevector(__s1_215, __s1_215, 1, 0); \
-  uint32x4_t __ret_215; \
-  __ret_215 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_215), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_215, __p2_215)))); \
-  __ret_215 = __builtin_shufflevector(__ret_215, __ret_215, 3, 2, 1, 0); \
-  __ret_215; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u16(__p0_216, __p1_216, __p2_216) __extension__ ({ \
-  uint8x8_t __s0_216 = __p0_216; \
-  uint16x8_t __s1_216 = __p1_216; \
-  uint8x16_t __ret_216; \
-  __ret_216 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_216), (uint8x8_t)(vrshrn_n_u16(__s1_216, __p2_216)))); \
-  __ret_216; \
-})
-#else
-#define vrshrn_high_n_u16(__p0_217, __p1_217, __p2_217) __extension__ ({ \
-  uint8x8_t __s0_217 = __p0_217; \
-  uint16x8_t __s1_217 = __p1_217; \
-  uint8x8_t __rev0_217;  __rev0_217 = __builtin_shufflevector(__s0_217, __s0_217, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_217;  __rev1_217 = __builtin_shufflevector(__s1_217, __s1_217, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_217; \
-  __ret_217 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_217), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_217, __p2_217)))); \
-  __ret_217 = __builtin_shufflevector(__ret_217, __ret_217, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_217; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s32(__p0_218, __p1_218, __p2_218) __extension__ ({ \
-  int16x4_t __s0_218 = __p0_218; \
-  int32x4_t __s1_218 = __p1_218; \
-  int16x8_t __ret_218; \
-  __ret_218 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_218), (int16x4_t)(vrshrn_n_s32(__s1_218, __p2_218)))); \
-  __ret_218; \
-})
-#else
-#define vrshrn_high_n_s32(__p0_219, __p1_219, __p2_219) __extension__ ({ \
-  int16x4_t __s0_219 = __p0_219; \
-  int32x4_t __s1_219 = __p1_219; \
-  int16x4_t __rev0_219;  __rev0_219 = __builtin_shufflevector(__s0_219, __s0_219, 3, 2, 1, 0); \
-  int32x4_t __rev1_219;  __rev1_219 = __builtin_shufflevector(__s1_219, __s1_219, 3, 2, 1, 0); \
-  int16x8_t __ret_219; \
-  __ret_219 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_219), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_219, __p2_219)))); \
-  __ret_219 = __builtin_shufflevector(__ret_219, __ret_219, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_219; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s64(__p0_220, __p1_220, __p2_220) __extension__ ({ \
-  int32x2_t __s0_220 = __p0_220; \
-  int64x2_t __s1_220 = __p1_220; \
-  int32x4_t __ret_220; \
-  __ret_220 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_220), (int32x2_t)(vrshrn_n_s64(__s1_220, __p2_220)))); \
-  __ret_220; \
-})
-#else
-#define vrshrn_high_n_s64(__p0_221, __p1_221, __p2_221) __extension__ ({ \
-  int32x2_t __s0_221 = __p0_221; \
-  int64x2_t __s1_221 = __p1_221; \
-  int32x2_t __rev0_221;  __rev0_221 = __builtin_shufflevector(__s0_221, __s0_221, 1, 0); \
-  int64x2_t __rev1_221;  __rev1_221 = __builtin_shufflevector(__s1_221, __s1_221, 1, 0); \
-  int32x4_t __ret_221; \
-  __ret_221 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_221), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_221, __p2_221)))); \
-  __ret_221 = __builtin_shufflevector(__ret_221, __ret_221, 3, 2, 1, 0); \
-  __ret_221; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s16(__p0_222, __p1_222, __p2_222) __extension__ ({ \
-  int8x8_t __s0_222 = __p0_222; \
-  int16x8_t __s1_222 = __p1_222; \
-  int8x16_t __ret_222; \
-  __ret_222 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_222), (int8x8_t)(vrshrn_n_s16(__s1_222, __p2_222)))); \
-  __ret_222; \
-})
-#else
-#define vrshrn_high_n_s16(__p0_223, __p1_223, __p2_223) __extension__ ({ \
-  int8x8_t __s0_223 = __p0_223; \
-  int16x8_t __s1_223 = __p1_223; \
-  int8x8_t __rev0_223;  __rev0_223 = __builtin_shufflevector(__s0_223, __s0_223, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_223;  __rev1_223 = __builtin_shufflevector(__s1_223, __s1_223, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_223; \
-  __ret_223 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_223), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_223, __p2_223)))); \
-  __ret_223 = __builtin_shufflevector(__ret_223, __ret_223, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_223; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrsqrte_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-__ai float64_t vrsqrted_f64(float64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0);
-  return __ret;
-}
-__ai float32_t vrsqrtes_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-__ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1);
-  return __ret;
-}
-#define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __s1 = __p1; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __s1 = __p1; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vrsubhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vrsubhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vrsubhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vrsubhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vrsubhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vrsubhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vrsubhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vrsubhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vrsubhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vrsubhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vrsubhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vrsubhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-__ai uint64_t vshld_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1);
-  return __ret;
-}
-#define vshld_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vshld_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u8(__p0_224, __p1_224) __extension__ ({ \
-  uint8x16_t __s0_224 = __p0_224; \
-  uint16x8_t __ret_224; \
-  __ret_224 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_224), __p1_224)); \
-  __ret_224; \
-})
-#else
-#define vshll_high_n_u8(__p0_225, __p1_225) __extension__ ({ \
-  uint8x16_t __s0_225 = __p0_225; \
-  uint8x16_t __rev0_225;  __rev0_225 = __builtin_shufflevector(__s0_225, __s0_225, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_225; \
-  __ret_225 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_225), __p1_225)); \
-  __ret_225 = __builtin_shufflevector(__ret_225, __ret_225, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_225; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u32(__p0_226, __p1_226) __extension__ ({ \
-  uint32x4_t __s0_226 = __p0_226; \
-  uint64x2_t __ret_226; \
-  __ret_226 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_226), __p1_226)); \
-  __ret_226; \
-})
-#else
-#define vshll_high_n_u32(__p0_227, __p1_227) __extension__ ({ \
-  uint32x4_t __s0_227 = __p0_227; \
-  uint32x4_t __rev0_227;  __rev0_227 = __builtin_shufflevector(__s0_227, __s0_227, 3, 2, 1, 0); \
-  uint64x2_t __ret_227; \
-  __ret_227 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_227), __p1_227)); \
-  __ret_227 = __builtin_shufflevector(__ret_227, __ret_227, 1, 0); \
-  __ret_227; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u16(__p0_228, __p1_228) __extension__ ({ \
-  uint16x8_t __s0_228 = __p0_228; \
-  uint32x4_t __ret_228; \
-  __ret_228 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_228), __p1_228)); \
-  __ret_228; \
-})
-#else
-#define vshll_high_n_u16(__p0_229, __p1_229) __extension__ ({ \
-  uint16x8_t __s0_229 = __p0_229; \
-  uint16x8_t __rev0_229;  __rev0_229 = __builtin_shufflevector(__s0_229, __s0_229, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_229; \
-  __ret_229 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_229), __p1_229)); \
-  __ret_229 = __builtin_shufflevector(__ret_229, __ret_229, 3, 2, 1, 0); \
-  __ret_229; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s8(__p0_230, __p1_230) __extension__ ({ \
-  int8x16_t __s0_230 = __p0_230; \
-  int16x8_t __ret_230; \
-  __ret_230 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_230), __p1_230)); \
-  __ret_230; \
-})
-#else
-#define vshll_high_n_s8(__p0_231, __p1_231) __extension__ ({ \
-  int8x16_t __s0_231 = __p0_231; \
-  int8x16_t __rev0_231;  __rev0_231 = __builtin_shufflevector(__s0_231, __s0_231, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_231; \
-  __ret_231 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_231), __p1_231)); \
-  __ret_231 = __builtin_shufflevector(__ret_231, __ret_231, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_231; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s32(__p0_232, __p1_232) __extension__ ({ \
-  int32x4_t __s0_232 = __p0_232; \
-  int64x2_t __ret_232; \
-  __ret_232 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_232), __p1_232)); \
-  __ret_232; \
-})
-#else
-#define vshll_high_n_s32(__p0_233, __p1_233) __extension__ ({ \
-  int32x4_t __s0_233 = __p0_233; \
-  int32x4_t __rev0_233;  __rev0_233 = __builtin_shufflevector(__s0_233, __s0_233, 3, 2, 1, 0); \
-  int64x2_t __ret_233; \
-  __ret_233 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_233), __p1_233)); \
-  __ret_233 = __builtin_shufflevector(__ret_233, __ret_233, 1, 0); \
-  __ret_233; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s16(__p0_234, __p1_234) __extension__ ({ \
-  int16x8_t __s0_234 = __p0_234; \
-  int32x4_t __ret_234; \
-  __ret_234 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_234), __p1_234)); \
-  __ret_234; \
-})
-#else
-#define vshll_high_n_s16(__p0_235, __p1_235) __extension__ ({ \
-  int16x8_t __s0_235 = __p0_235; \
-  int16x8_t __rev0_235;  __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_235; \
-  __ret_235 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_235), __p1_235)); \
-  __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 3, 2, 1, 0); \
-  __ret_235; \
-})
-#endif
-
-#define vshrd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vshrd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u32(__p0_236, __p1_236, __p2_236) __extension__ ({ \
-  uint16x4_t __s0_236 = __p0_236; \
-  uint32x4_t __s1_236 = __p1_236; \
-  uint16x8_t __ret_236; \
-  __ret_236 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_236), (uint16x4_t)(vshrn_n_u32(__s1_236, __p2_236)))); \
-  __ret_236; \
-})
-#else
-#define vshrn_high_n_u32(__p0_237, __p1_237, __p2_237) __extension__ ({ \
-  uint16x4_t __s0_237 = __p0_237; \
-  uint32x4_t __s1_237 = __p1_237; \
-  uint16x4_t __rev0_237;  __rev0_237 = __builtin_shufflevector(__s0_237, __s0_237, 3, 2, 1, 0); \
-  uint32x4_t __rev1_237;  __rev1_237 = __builtin_shufflevector(__s1_237, __s1_237, 3, 2, 1, 0); \
-  uint16x8_t __ret_237; \
-  __ret_237 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_237), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_237, __p2_237)))); \
-  __ret_237 = __builtin_shufflevector(__ret_237, __ret_237, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_237; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u64(__p0_238, __p1_238, __p2_238) __extension__ ({ \
-  uint32x2_t __s0_238 = __p0_238; \
-  uint64x2_t __s1_238 = __p1_238; \
-  uint32x4_t __ret_238; \
-  __ret_238 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_238), (uint32x2_t)(vshrn_n_u64(__s1_238, __p2_238)))); \
-  __ret_238; \
-})
-#else
-#define vshrn_high_n_u64(__p0_239, __p1_239, __p2_239) __extension__ ({ \
-  uint32x2_t __s0_239 = __p0_239; \
-  uint64x2_t __s1_239 = __p1_239; \
-  uint32x2_t __rev0_239;  __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 1, 0); \
-  uint64x2_t __rev1_239;  __rev1_239 = __builtin_shufflevector(__s1_239, __s1_239, 1, 0); \
-  uint32x4_t __ret_239; \
-  __ret_239 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_239), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_239, __p2_239)))); \
-  __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 3, 2, 1, 0); \
-  __ret_239; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u16(__p0_240, __p1_240, __p2_240) __extension__ ({ \
-  uint8x8_t __s0_240 = __p0_240; \
-  uint16x8_t __s1_240 = __p1_240; \
-  uint8x16_t __ret_240; \
-  __ret_240 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_240), (uint8x8_t)(vshrn_n_u16(__s1_240, __p2_240)))); \
-  __ret_240; \
-})
-#else
-#define vshrn_high_n_u16(__p0_241, __p1_241, __p2_241) __extension__ ({ \
-  uint8x8_t __s0_241 = __p0_241; \
-  uint16x8_t __s1_241 = __p1_241; \
-  uint8x8_t __rev0_241;  __rev0_241 = __builtin_shufflevector(__s0_241, __s0_241, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_241;  __rev1_241 = __builtin_shufflevector(__s1_241, __s1_241, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_241; \
-  __ret_241 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_241), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_241, __p2_241)))); \
-  __ret_241 = __builtin_shufflevector(__ret_241, __ret_241, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_241; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s32(__p0_242, __p1_242, __p2_242) __extension__ ({ \
-  int16x4_t __s0_242 = __p0_242; \
-  int32x4_t __s1_242 = __p1_242; \
-  int16x8_t __ret_242; \
-  __ret_242 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_242), (int16x4_t)(vshrn_n_s32(__s1_242, __p2_242)))); \
-  __ret_242; \
-})
-#else
-#define vshrn_high_n_s32(__p0_243, __p1_243, __p2_243) __extension__ ({ \
-  int16x4_t __s0_243 = __p0_243; \
-  int32x4_t __s1_243 = __p1_243; \
-  int16x4_t __rev0_243;  __rev0_243 = __builtin_shufflevector(__s0_243, __s0_243, 3, 2, 1, 0); \
-  int32x4_t __rev1_243;  __rev1_243 = __builtin_shufflevector(__s1_243, __s1_243, 3, 2, 1, 0); \
-  int16x8_t __ret_243; \
-  __ret_243 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_243), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_243, __p2_243)))); \
-  __ret_243 = __builtin_shufflevector(__ret_243, __ret_243, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_243; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s64(__p0_244, __p1_244, __p2_244) __extension__ ({ \
-  int32x2_t __s0_244 = __p0_244; \
-  int64x2_t __s1_244 = __p1_244; \
-  int32x4_t __ret_244; \
-  __ret_244 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_244), (int32x2_t)(vshrn_n_s64(__s1_244, __p2_244)))); \
-  __ret_244; \
-})
-#else
-#define vshrn_high_n_s64(__p0_245, __p1_245, __p2_245) __extension__ ({ \
-  int32x2_t __s0_245 = __p0_245; \
-  int64x2_t __s1_245 = __p1_245; \
-  int32x2_t __rev0_245;  __rev0_245 = __builtin_shufflevector(__s0_245, __s0_245, 1, 0); \
-  int64x2_t __rev1_245;  __rev1_245 = __builtin_shufflevector(__s1_245, __s1_245, 1, 0); \
-  int32x4_t __ret_245; \
-  __ret_245 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_245), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_245, __p2_245)))); \
-  __ret_245 = __builtin_shufflevector(__ret_245, __ret_245, 3, 2, 1, 0); \
-  __ret_245; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s16(__p0_246, __p1_246, __p2_246) __extension__ ({ \
-  int8x8_t __s0_246 = __p0_246; \
-  int16x8_t __s1_246 = __p1_246; \
-  int8x16_t __ret_246; \
-  __ret_246 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_246), (int8x8_t)(vshrn_n_s16(__s1_246, __p2_246)))); \
-  __ret_246; \
-})
-#else
-#define vshrn_high_n_s16(__p0_247, __p1_247, __p2_247) __extension__ ({ \
-  int8x8_t __s0_247 = __p0_247; \
-  int16x8_t __s1_247 = __p1_247; \
-  int8x8_t __rev0_247;  __rev0_247 = __builtin_shufflevector(__s0_247, __s0_247, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_247;  __rev1_247 = __builtin_shufflevector(__s1_247, __s1_247, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_247; \
-  __ret_247 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_247), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_247, __p2_247)))); \
-  __ret_247 = __builtin_shufflevector(__ret_247, __ret_247, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_247; \
-})
-#endif
-
-#define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __s1 = __p1; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __s1 = __p1; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai uint8_t vsqaddb_u8(uint8_t __p0, int8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vsqadds_u32(uint32_t __p0, int32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vsqaddd_u64(uint64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vsqaddh_u16(uint16_t __p0, int16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vsqadd_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vsqrt_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vsqrt_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vsqrt_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __s1 = __p1; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __s1 = __p1; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __s1 = __p1; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __s1 = __p1; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vst1_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 38); \
-})
-#else
-#define vst1q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 42); \
-})
-#else
-#define vst1q_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 42); \
-})
-#endif
-
-#define vst1_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \
-})
-#define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
-})
-#else
-#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
-})
-#else
-#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
-})
-#endif
-
-#define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
-})
-#define vst1_p64_x2(__p0, __p1) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \
-})
-#else
-#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 42); \
-})
-#else
-#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 42); \
-})
-#endif
-
-#define vst1_f64_x2(__p0, __p1) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 10); \
-})
-#define vst1_p64_x3(__p0, __p1) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \
-})
-#else
-#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 42); \
-})
-#else
-#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 42); \
-})
-#endif
-
-#define vst1_f64_x3(__p0, __p1) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
-})
-#define vst1_p64_x4(__p0, __p1) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \
-})
-#else
-#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 42); \
-})
-#else
-#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 42); \
-})
-#endif
-
-#define vst1_f64_x4(__p0, __p1) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
-})
-#define vst2_p64(__p0, __p1) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \
-})
-#else
-#define vst2q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \
-})
-#else
-#define vst2q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 42); \
-})
-#else
-#define vst2q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 35); \
-})
-#else
-#define vst2q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 35); \
-})
-#endif
-
-#define vst2_f64(__p0, __p1) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 10); \
-})
-#define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
-})
-#else
-#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
-})
-#else
-#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
-})
-#else
-#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
-})
-#else
-#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
-})
-#else
-#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 42); \
-})
-#else
-#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 35); \
-})
-#else
-#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 35); \
-})
-#endif
-
-#define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
-})
-#define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 10); \
-})
-#define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 3); \
-})
-#define vst3_p64(__p0, __p1) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \
-})
-#else
-#define vst3q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \
-})
-#else
-#define vst3q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 42); \
-})
-#else
-#define vst3q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 35); \
-})
-#else
-#define vst3q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 35); \
-})
-#endif
-
-#define vst3_f64(__p0, __p1) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
-})
-#define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
-})
-#else
-#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
-})
-#else
-#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
-})
-#else
-#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
-})
-#else
-#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
-})
-#else
-#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 42); \
-})
-#else
-#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 35); \
-})
-#else
-#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 35); \
-})
-#endif
-
-#define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
-})
-#define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
-})
-#define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
-})
-#define vst4_p64(__p0, __p1) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \
-})
-#else
-#define vst4q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \
-})
-#else
-#define vst4q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 42); \
-})
-#else
-#define vst4q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 35); \
-})
-#else
-#define vst4q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 35); \
-})
-#endif
-
-#define vst4_f64(__p0, __p1) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
-})
-#define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
-})
-#else
-#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
-})
-#else
-#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
-})
-#else
-#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
-})
-#else
-#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
-})
-#else
-#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 42); \
-})
-#else
-#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 35); \
-})
-#else
-#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 35); \
-})
-#endif
-
-#define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
-})
-#define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
-})
-#define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
-})
-#define vstrq_p128(__p0, __p1) __extension__ ({ \
-  poly128_t __s1 = __p1; \
-  __builtin_neon_vstrq_p128(__p0, __s1); \
-})
-__ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vsubhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vsubhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vsubhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vsubhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vsubhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vsubhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vsubhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vsubhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vsubhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vsubhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vsubhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vsubhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmovl_high_u8(__p0) - vmovl_high_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmovl_high_u8(__rev0) - __noswap_vmovl_high_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmovl_high_u32(__p0) - vmovl_high_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmovl_high_u32(__rev0) - __noswap_vmovl_high_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmovl_high_u16(__p0) - vmovl_high_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmovl_high_u16(__rev0) - __noswap_vmovl_high_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vmovl_high_s8(__p0) - vmovl_high_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmovl_high_s8(__rev0) - __noswap_vmovl_high_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vmovl_high_s32(__p0) - vmovl_high_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmovl_high_s32(__rev0) - __noswap_vmovl_high_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vmovl_high_s16(__p0) - vmovl_high_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmovl_high_s16(__rev0) - __noswap_vmovl_high_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 - vmovl_high_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 - vmovl_high_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 - vmovl_high_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 - vmovl_high_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 - vmovl_high_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 - vmovl_high_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  return __ret;
-}
-#else
-__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  return __ret;
-}
-#else
-__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  return __ret;
-}
-#else
-__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  return __ret;
-}
-#else
-__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vtstd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vtstd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vuqaddb_s8(int8_t __p0, uint8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vuqadds_s32(int32_t __p0, uint32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vuqaddd_s64(int64_t __p0, uint64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vuqaddh_s16(int16_t __p0, uint16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vuqadd_s64(int64x1_t __p0, uint64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  return __ret;
-}
-#else
-__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  return __ret;
-}
-#else
-__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  return __ret;
-}
-#else
-__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  return __ret;
-}
-#else
-__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  return __ret;
-}
-#else
-__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  return __ret;
-}
-#else
-__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  return __ret;
-}
-#else
-__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  return __ret;
-}
-#else
-__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  return __ret;
-}
-#else
-__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  return __ret;
-}
-#else
-__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  return __ret;
-}
-#else
-__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  return __ret;
-}
-#else
-__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = __p0 + vabdq_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 + __noswap_vabdq_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + vabdq_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vabdq_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + vabdq_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vabdq_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = __p0 + vabdq_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 + __noswap_vabdq_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + vabdq_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vabdq_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + vabdq_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vabdq_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = __p0 + vabd_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 + __noswap_vabd_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 + vabd_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 + __noswap_vabd_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 + vabd_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 + __noswap_vabd_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = __p0 + vabd_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 + __noswap_vabd_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 + vabd_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 + __noswap_vabd_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 + vabd_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 + __noswap_vabd_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(vmovl_u8((uint8x8_t)(vabd_u8(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(vmovl_u32((uint32x2_t)(vabd_u32(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(vmovl_u16((uint16x4_t)(vabd_u16(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(vmovl_u8((uint8x8_t)(vabd_s8(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(vmovl_u32((uint32x2_t)(vabd_s32(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(vmovl_u16((uint16x4_t)(vabd_s16(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmovl_u8(__p0) + vmovl_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmovl_u8(__rev0) + __noswap_vmovl_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmovl_u32(__p0) + vmovl_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmovl_u32(__rev0) + __noswap_vmovl_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmovl_u16(__p0) + vmovl_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmovl_u16(__rev0) + __noswap_vmovl_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = vmovl_s8(__p0) + vmovl_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmovl_s8(__rev0) + __noswap_vmovl_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = vmovl_s32(__p0) + vmovl_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmovl_s32(__rev0) + __noswap_vmovl_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = vmovl_s16(__p0) + vmovl_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmovl_s16(__rev0) + __noswap_vmovl_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 + vmovl_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vmovl_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 + vmovl_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vmovl_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 + vmovl_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vmovl_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 + vmovl_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vmovl_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 + vmovl_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vmovl_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 + vmovl_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vmovl_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_f16(__p0_248, __p1_248) __extension__ ({ \
-  float16x4_t __s0_248 = __p0_248; \
-  float16_t __ret_248; \
-float16x4_t __reint_248 = __s0_248; \
-int16_t __reint1_248 = vget_lane_s16(*(int16x4_t *) &__reint_248, __p1_248); \
-  __ret_248 = *(float16_t *) &__reint1_248; \
-  __ret_248; \
-})
-#else
-#define vget_lane_f16(__p0_249, __p1_249) __extension__ ({ \
-  float16x4_t __s0_249 = __p0_249; \
-  float16x4_t __rev0_249;  __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, 3, 2, 1, 0); \
-  float16_t __ret_249; \
-float16x4_t __reint_249 = __rev0_249; \
-int16_t __reint1_249 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_249, __p1_249); \
-  __ret_249 = *(float16_t *) &__reint1_249; \
-  __ret_249; \
-})
-#define __noswap_vget_lane_f16(__p0_250, __p1_250) __extension__ ({ \
-  float16x4_t __s0_250 = __p0_250; \
-  float16_t __ret_250; \
-float16x4_t __reint_250 = __s0_250; \
-int16_t __reint1_250 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_250, __p1_250); \
-  __ret_250 = *(float16_t *) &__reint1_250; \
-  __ret_250; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f16(__p0_251, __p1_251) __extension__ ({ \
-  float16x8_t __s0_251 = __p0_251; \
-  float16_t __ret_251; \
-float16x8_t __reint_251 = __s0_251; \
-int16_t __reint1_251 = vgetq_lane_s16(*(int16x8_t *) &__reint_251, __p1_251); \
-  __ret_251 = *(float16_t *) &__reint1_251; \
-  __ret_251; \
-})
-#else
-#define vgetq_lane_f16(__p0_252, __p1_252) __extension__ ({ \
-  float16x8_t __s0_252 = __p0_252; \
-  float16x8_t __rev0_252;  __rev0_252 = __builtin_shufflevector(__s0_252, __s0_252, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_252; \
-float16x8_t __reint_252 = __rev0_252; \
-int16_t __reint1_252 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_252, __p1_252); \
-  __ret_252 = *(float16_t *) &__reint1_252; \
-  __ret_252; \
-})
-#define __noswap_vgetq_lane_f16(__p0_253, __p1_253) __extension__ ({ \
-  float16x8_t __s0_253 = __p0_253; \
-  float16_t __ret_253; \
-float16x8_t __reint_253 = __s0_253; \
-int16_t __reint1_253 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_253, __p1_253); \
-  __ret_253 = *(float16_t *) &__reint1_253; \
-  __ret_253; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + vmull_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vmull_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + __noswap_vmull_u8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + vmull_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vmull_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + __noswap_vmull_u32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + vmull_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vmull_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __noswap_vmull_u16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + vmull_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vmull_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + __noswap_vmull_s8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + vmull_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vmull_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + __noswap_vmull_s32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + vmull_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vmull_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __noswap_vmull_s16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + vmull_s32(__p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 - vmull_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __noswap_vmull_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 - __noswap_vmull_u8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 - vmull_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __noswap_vmull_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 - __noswap_vmull_u32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - vmull_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __noswap_vmull_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - __noswap_vmull_u16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 - vmull_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __noswap_vmull_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 - __noswap_vmull_s8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 - vmull_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __noswap_vmull_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 - __noswap_vmull_s32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - vmull_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __noswap_vmull_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - __noswap_vmull_s16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __ret; \
-  __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __s2 = __p2; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __ret; \
-  __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __s2 = __p2; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __ret; \
-  __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64x2_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __ret; \
-  __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
-  __ret; \
-})
-#else
-#define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 - vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 - __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 - vmull_s32(__p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 - __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_f16(__p0_254, __p1_254, __p2_254) __extension__ ({ \
-  float16_t __s0_254 = __p0_254; \
-  float16x4_t __s1_254 = __p1_254; \
-  float16x4_t __ret_254; \
-float16_t __reint_254 = __s0_254; \
-float16x4_t __reint1_254 = __s1_254; \
-int16x4_t __reint2_254 = vset_lane_s16(*(int16_t *) &__reint_254, *(int16x4_t *) &__reint1_254, __p2_254); \
-  __ret_254 = *(float16x4_t *) &__reint2_254; \
-  __ret_254; \
-})
-#else
-#define vset_lane_f16(__p0_255, __p1_255, __p2_255) __extension__ ({ \
-  float16_t __s0_255 = __p0_255; \
-  float16x4_t __s1_255 = __p1_255; \
-  float16x4_t __rev1_255;  __rev1_255 = __builtin_shufflevector(__s1_255, __s1_255, 3, 2, 1, 0); \
-  float16x4_t __ret_255; \
-float16_t __reint_255 = __s0_255; \
-float16x4_t __reint1_255 = __rev1_255; \
-int16x4_t __reint2_255 = __noswap_vset_lane_s16(*(int16_t *) &__reint_255, *(int16x4_t *) &__reint1_255, __p2_255); \
-  __ret_255 = *(float16x4_t *) &__reint2_255; \
-  __ret_255 = __builtin_shufflevector(__ret_255, __ret_255, 3, 2, 1, 0); \
-  __ret_255; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_f16(__p0_256, __p1_256, __p2_256) __extension__ ({ \
-  float16_t __s0_256 = __p0_256; \
-  float16x8_t __s1_256 = __p1_256; \
-  float16x8_t __ret_256; \
-float16_t __reint_256 = __s0_256; \
-float16x8_t __reint1_256 = __s1_256; \
-int16x8_t __reint2_256 = vsetq_lane_s16(*(int16_t *) &__reint_256, *(int16x8_t *) &__reint1_256, __p2_256); \
-  __ret_256 = *(float16x8_t *) &__reint2_256; \
-  __ret_256; \
-})
-#else
-#define vsetq_lane_f16(__p0_257, __p1_257, __p2_257) __extension__ ({ \
-  float16_t __s0_257 = __p0_257; \
-  float16x8_t __s1_257 = __p1_257; \
-  float16x8_t __rev1_257;  __rev1_257 = __builtin_shufflevector(__s1_257, __s1_257, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_257; \
-float16_t __reint_257 = __s0_257; \
-float16x8_t __reint1_257 = __rev1_257; \
-int16x8_t __reint2_257 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_257, *(int16x8_t *) &__reint1_257, __p2_257); \
-  __ret_257 = *(float16x8_t *) &__reint2_257; \
-  __ret_257 = __builtin_shufflevector(__ret_257, __ret_257, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_257; \
-})
-#endif
-
-#if defined(__ARM_FEATURE_FP16FML) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_high_f16(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \
-  float32x4_t __s0_258 = __p0_258; \
-  float16x8_t __s1_258 = __p1_258; \
-  float16x4_t __s2_258 = __p2_258; \
-  float32x4_t __ret_258; \
-  __ret_258 = vfmlalq_high_f16(__s0_258, __s1_258, (float16x8_t) {vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258), vget_lane_f16(__s2_258, __p3_258)}); \
-  __ret_258; \
-})
-#else
-#define vfmlalq_lane_high_f16(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \
-  float32x4_t __s0_259 = __p0_259; \
-  float16x8_t __s1_259 = __p1_259; \
-  float16x4_t __s2_259 = __p2_259; \
-  float32x4_t __rev0_259;  __rev0_259 = __builtin_shufflevector(__s0_259, __s0_259, 3, 2, 1, 0); \
-  float16x8_t __rev1_259;  __rev1_259 = __builtin_shufflevector(__s1_259, __s1_259, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_259;  __rev2_259 = __builtin_shufflevector(__s2_259, __s2_259, 3, 2, 1, 0); \
-  float32x4_t __ret_259; \
-  __ret_259 = __noswap_vfmlalq_high_f16(__rev0_259, __rev1_259, (float16x8_t) {__noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259), __noswap_vget_lane_f16(__rev2_259, __p3_259)}); \
-  __ret_259 = __builtin_shufflevector(__ret_259, __ret_259, 3, 2, 1, 0); \
-  __ret_259; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_high_f16(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \
-  float32x2_t __s0_260 = __p0_260; \
-  float16x4_t __s1_260 = __p1_260; \
-  float16x4_t __s2_260 = __p2_260; \
-  float32x2_t __ret_260; \
-  __ret_260 = vfmlal_high_f16(__s0_260, __s1_260, (float16x4_t) {vget_lane_f16(__s2_260, __p3_260), vget_lane_f16(__s2_260, __p3_260), vget_lane_f16(__s2_260, __p3_260), vget_lane_f16(__s2_260, __p3_260)}); \
-  __ret_260; \
-})
-#else
-#define vfmlal_lane_high_f16(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \
-  float32x2_t __s0_261 = __p0_261; \
-  float16x4_t __s1_261 = __p1_261; \
-  float16x4_t __s2_261 = __p2_261; \
-  float32x2_t __rev0_261;  __rev0_261 = __builtin_shufflevector(__s0_261, __s0_261, 1, 0); \
-  float16x4_t __rev1_261;  __rev1_261 = __builtin_shufflevector(__s1_261, __s1_261, 3, 2, 1, 0); \
-  float16x4_t __rev2_261;  __rev2_261 = __builtin_shufflevector(__s2_261, __s2_261, 3, 2, 1, 0); \
-  float32x2_t __ret_261; \
-  __ret_261 = __noswap_vfmlal_high_f16(__rev0_261, __rev1_261, (float16x4_t) {__noswap_vget_lane_f16(__rev2_261, __p3_261), __noswap_vget_lane_f16(__rev2_261, __p3_261), __noswap_vget_lane_f16(__rev2_261, __p3_261), __noswap_vget_lane_f16(__rev2_261, __p3_261)}); \
-  __ret_261 = __builtin_shufflevector(__ret_261, __ret_261, 1, 0); \
-  __ret_261; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_low_f16(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \
-  float32x4_t __s0_262 = __p0_262; \
-  float16x8_t __s1_262 = __p1_262; \
-  float16x4_t __s2_262 = __p2_262; \
-  float32x4_t __ret_262; \
-  __ret_262 = vfmlalq_low_f16(__s0_262, __s1_262, (float16x8_t) {vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262), vget_lane_f16(__s2_262, __p3_262)}); \
-  __ret_262; \
-})
-#else
-#define vfmlalq_lane_low_f16(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \
-  float32x4_t __s0_263 = __p0_263; \
-  float16x8_t __s1_263 = __p1_263; \
-  float16x4_t __s2_263 = __p2_263; \
-  float32x4_t __rev0_263;  __rev0_263 = __builtin_shufflevector(__s0_263, __s0_263, 3, 2, 1, 0); \
-  float16x8_t __rev1_263;  __rev1_263 = __builtin_shufflevector(__s1_263, __s1_263, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_263;  __rev2_263 = __builtin_shufflevector(__s2_263, __s2_263, 3, 2, 1, 0); \
-  float32x4_t __ret_263; \
-  __ret_263 = __noswap_vfmlalq_low_f16(__rev0_263, __rev1_263, (float16x8_t) {__noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263), __noswap_vget_lane_f16(__rev2_263, __p3_263)}); \
-  __ret_263 = __builtin_shufflevector(__ret_263, __ret_263, 3, 2, 1, 0); \
-  __ret_263; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_low_f16(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \
-  float32x2_t __s0_264 = __p0_264; \
-  float16x4_t __s1_264 = __p1_264; \
-  float16x4_t __s2_264 = __p2_264; \
-  float32x2_t __ret_264; \
-  __ret_264 = vfmlal_low_f16(__s0_264, __s1_264, (float16x4_t) {vget_lane_f16(__s2_264, __p3_264), vget_lane_f16(__s2_264, __p3_264), vget_lane_f16(__s2_264, __p3_264), vget_lane_f16(__s2_264, __p3_264)}); \
-  __ret_264; \
-})
-#else
-#define vfmlal_lane_low_f16(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \
-  float32x2_t __s0_265 = __p0_265; \
-  float16x4_t __s1_265 = __p1_265; \
-  float16x4_t __s2_265 = __p2_265; \
-  float32x2_t __rev0_265;  __rev0_265 = __builtin_shufflevector(__s0_265, __s0_265, 1, 0); \
-  float16x4_t __rev1_265;  __rev1_265 = __builtin_shufflevector(__s1_265, __s1_265, 3, 2, 1, 0); \
-  float16x4_t __rev2_265;  __rev2_265 = __builtin_shufflevector(__s2_265, __s2_265, 3, 2, 1, 0); \
-  float32x2_t __ret_265; \
-  __ret_265 = __noswap_vfmlal_low_f16(__rev0_265, __rev1_265, (float16x4_t) {__noswap_vget_lane_f16(__rev2_265, __p3_265), __noswap_vget_lane_f16(__rev2_265, __p3_265), __noswap_vget_lane_f16(__rev2_265, __p3_265), __noswap_vget_lane_f16(__rev2_265, __p3_265)}); \
-  __ret_265 = __builtin_shufflevector(__ret_265, __ret_265, 1, 0); \
-  __ret_265; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_high_f16(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \
-  float32x4_t __s0_266 = __p0_266; \
-  float16x8_t __s1_266 = __p1_266; \
-  float16x8_t __s2_266 = __p2_266; \
-  float32x4_t __ret_266; \
-  __ret_266 = vfmlalq_high_f16(__s0_266, __s1_266, (float16x8_t) {vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266), vgetq_lane_f16(__s2_266, __p3_266)}); \
-  __ret_266; \
-})
-#else
-#define vfmlalq_laneq_high_f16(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \
-  float32x4_t __s0_267 = __p0_267; \
-  float16x8_t __s1_267 = __p1_267; \
-  float16x8_t __s2_267 = __p2_267; \
-  float32x4_t __rev0_267;  __rev0_267 = __builtin_shufflevector(__s0_267, __s0_267, 3, 2, 1, 0); \
-  float16x8_t __rev1_267;  __rev1_267 = __builtin_shufflevector(__s1_267, __s1_267, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_267;  __rev2_267 = __builtin_shufflevector(__s2_267, __s2_267, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_267; \
-  __ret_267 = __noswap_vfmlalq_high_f16(__rev0_267, __rev1_267, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267), __noswap_vgetq_lane_f16(__rev2_267, __p3_267)}); \
-  __ret_267 = __builtin_shufflevector(__ret_267, __ret_267, 3, 2, 1, 0); \
-  __ret_267; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_high_f16(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \
-  float32x2_t __s0_268 = __p0_268; \
-  float16x4_t __s1_268 = __p1_268; \
-  float16x8_t __s2_268 = __p2_268; \
-  float32x2_t __ret_268; \
-  __ret_268 = vfmlal_high_f16(__s0_268, __s1_268, (float16x4_t) {vgetq_lane_f16(__s2_268, __p3_268), vgetq_lane_f16(__s2_268, __p3_268), vgetq_lane_f16(__s2_268, __p3_268), vgetq_lane_f16(__s2_268, __p3_268)}); \
-  __ret_268; \
-})
-#else
-#define vfmlal_laneq_high_f16(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \
-  float32x2_t __s0_269 = __p0_269; \
-  float16x4_t __s1_269 = __p1_269; \
-  float16x8_t __s2_269 = __p2_269; \
-  float32x2_t __rev0_269;  __rev0_269 = __builtin_shufflevector(__s0_269, __s0_269, 1, 0); \
-  float16x4_t __rev1_269;  __rev1_269 = __builtin_shufflevector(__s1_269, __s1_269, 3, 2, 1, 0); \
-  float16x8_t __rev2_269;  __rev2_269 = __builtin_shufflevector(__s2_269, __s2_269, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_269; \
-  __ret_269 = __noswap_vfmlal_high_f16(__rev0_269, __rev1_269, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_269, __p3_269), __noswap_vgetq_lane_f16(__rev2_269, __p3_269), __noswap_vgetq_lane_f16(__rev2_269, __p3_269), __noswap_vgetq_lane_f16(__rev2_269, __p3_269)}); \
-  __ret_269 = __builtin_shufflevector(__ret_269, __ret_269, 1, 0); \
-  __ret_269; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_low_f16(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \
-  float32x4_t __s0_270 = __p0_270; \
-  float16x8_t __s1_270 = __p1_270; \
-  float16x8_t __s2_270 = __p2_270; \
-  float32x4_t __ret_270; \
-  __ret_270 = vfmlalq_low_f16(__s0_270, __s1_270, (float16x8_t) {vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270), vgetq_lane_f16(__s2_270, __p3_270)}); \
-  __ret_270; \
-})
-#else
-#define vfmlalq_laneq_low_f16(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \
-  float32x4_t __s0_271 = __p0_271; \
-  float16x8_t __s1_271 = __p1_271; \
-  float16x8_t __s2_271 = __p2_271; \
-  float32x4_t __rev0_271;  __rev0_271 = __builtin_shufflevector(__s0_271, __s0_271, 3, 2, 1, 0); \
-  float16x8_t __rev1_271;  __rev1_271 = __builtin_shufflevector(__s1_271, __s1_271, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_271;  __rev2_271 = __builtin_shufflevector(__s2_271, __s2_271, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_271; \
-  __ret_271 = __noswap_vfmlalq_low_f16(__rev0_271, __rev1_271, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271), __noswap_vgetq_lane_f16(__rev2_271, __p3_271)}); \
-  __ret_271 = __builtin_shufflevector(__ret_271, __ret_271, 3, 2, 1, 0); \
-  __ret_271; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_low_f16(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \
-  float32x2_t __s0_272 = __p0_272; \
-  float16x4_t __s1_272 = __p1_272; \
-  float16x8_t __s2_272 = __p2_272; \
-  float32x2_t __ret_272; \
-  __ret_272 = vfmlal_low_f16(__s0_272, __s1_272, (float16x4_t) {vgetq_lane_f16(__s2_272, __p3_272), vgetq_lane_f16(__s2_272, __p3_272), vgetq_lane_f16(__s2_272, __p3_272), vgetq_lane_f16(__s2_272, __p3_272)}); \
-  __ret_272; \
-})
-#else
-#define vfmlal_laneq_low_f16(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \
-  float32x2_t __s0_273 = __p0_273; \
-  float16x4_t __s1_273 = __p1_273; \
-  float16x8_t __s2_273 = __p2_273; \
-  float32x2_t __rev0_273;  __rev0_273 = __builtin_shufflevector(__s0_273, __s0_273, 1, 0); \
-  float16x4_t __rev1_273;  __rev1_273 = __builtin_shufflevector(__s1_273, __s1_273, 3, 2, 1, 0); \
-  float16x8_t __rev2_273;  __rev2_273 = __builtin_shufflevector(__s2_273, __s2_273, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_273; \
-  __ret_273 = __noswap_vfmlal_low_f16(__rev0_273, __rev1_273, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_273, __p3_273), __noswap_vgetq_lane_f16(__rev2_273, __p3_273), __noswap_vgetq_lane_f16(__rev2_273, __p3_273), __noswap_vgetq_lane_f16(__rev2_273, __p3_273)}); \
-  __ret_273 = __builtin_shufflevector(__ret_273, __ret_273, 1, 0); \
-  __ret_273; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_high_f16(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \
-  float32x4_t __s0_274 = __p0_274; \
-  float16x8_t __s1_274 = __p1_274; \
-  float16x4_t __s2_274 = __p2_274; \
-  float32x4_t __ret_274; \
-  __ret_274 = vfmlslq_high_f16(__s0_274, __s1_274, (float16x8_t) {vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274), vget_lane_f16(__s2_274, __p3_274)}); \
-  __ret_274; \
-})
-#else
-#define vfmlslq_lane_high_f16(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \
-  float32x4_t __s0_275 = __p0_275; \
-  float16x8_t __s1_275 = __p1_275; \
-  float16x4_t __s2_275 = __p2_275; \
-  float32x4_t __rev0_275;  __rev0_275 = __builtin_shufflevector(__s0_275, __s0_275, 3, 2, 1, 0); \
-  float16x8_t __rev1_275;  __rev1_275 = __builtin_shufflevector(__s1_275, __s1_275, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_275;  __rev2_275 = __builtin_shufflevector(__s2_275, __s2_275, 3, 2, 1, 0); \
-  float32x4_t __ret_275; \
-  __ret_275 = __noswap_vfmlslq_high_f16(__rev0_275, __rev1_275, (float16x8_t) {__noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275), __noswap_vget_lane_f16(__rev2_275, __p3_275)}); \
-  __ret_275 = __builtin_shufflevector(__ret_275, __ret_275, 3, 2, 1, 0); \
-  __ret_275; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_high_f16(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \
-  float32x2_t __s0_276 = __p0_276; \
-  float16x4_t __s1_276 = __p1_276; \
-  float16x4_t __s2_276 = __p2_276; \
-  float32x2_t __ret_276; \
-  __ret_276 = vfmlsl_high_f16(__s0_276, __s1_276, (float16x4_t) {vget_lane_f16(__s2_276, __p3_276), vget_lane_f16(__s2_276, __p3_276), vget_lane_f16(__s2_276, __p3_276), vget_lane_f16(__s2_276, __p3_276)}); \
-  __ret_276; \
-})
-#else
-#define vfmlsl_lane_high_f16(__p0_277, __p1_277, __p2_277, __p3_277) __extension__ ({ \
-  float32x2_t __s0_277 = __p0_277; \
-  float16x4_t __s1_277 = __p1_277; \
-  float16x4_t __s2_277 = __p2_277; \
-  float32x2_t __rev0_277;  __rev0_277 = __builtin_shufflevector(__s0_277, __s0_277, 1, 0); \
-  float16x4_t __rev1_277;  __rev1_277 = __builtin_shufflevector(__s1_277, __s1_277, 3, 2, 1, 0); \
-  float16x4_t __rev2_277;  __rev2_277 = __builtin_shufflevector(__s2_277, __s2_277, 3, 2, 1, 0); \
-  float32x2_t __ret_277; \
-  __ret_277 = __noswap_vfmlsl_high_f16(__rev0_277, __rev1_277, (float16x4_t) {__noswap_vget_lane_f16(__rev2_277, __p3_277), __noswap_vget_lane_f16(__rev2_277, __p3_277), __noswap_vget_lane_f16(__rev2_277, __p3_277), __noswap_vget_lane_f16(__rev2_277, __p3_277)}); \
-  __ret_277 = __builtin_shufflevector(__ret_277, __ret_277, 1, 0); \
-  __ret_277; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_low_f16(__p0_278, __p1_278, __p2_278, __p3_278) __extension__ ({ \
-  float32x4_t __s0_278 = __p0_278; \
-  float16x8_t __s1_278 = __p1_278; \
-  float16x4_t __s2_278 = __p2_278; \
-  float32x4_t __ret_278; \
-  __ret_278 = vfmlslq_low_f16(__s0_278, __s1_278, (float16x8_t) {vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278), vget_lane_f16(__s2_278, __p3_278)}); \
-  __ret_278; \
-})
-#else
-#define vfmlslq_lane_low_f16(__p0_279, __p1_279, __p2_279, __p3_279) __extension__ ({ \
-  float32x4_t __s0_279 = __p0_279; \
-  float16x8_t __s1_279 = __p1_279; \
-  float16x4_t __s2_279 = __p2_279; \
-  float32x4_t __rev0_279;  __rev0_279 = __builtin_shufflevector(__s0_279, __s0_279, 3, 2, 1, 0); \
-  float16x8_t __rev1_279;  __rev1_279 = __builtin_shufflevector(__s1_279, __s1_279, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_279;  __rev2_279 = __builtin_shufflevector(__s2_279, __s2_279, 3, 2, 1, 0); \
-  float32x4_t __ret_279; \
-  __ret_279 = __noswap_vfmlslq_low_f16(__rev0_279, __rev1_279, (float16x8_t) {__noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279), __noswap_vget_lane_f16(__rev2_279, __p3_279)}); \
-  __ret_279 = __builtin_shufflevector(__ret_279, __ret_279, 3, 2, 1, 0); \
-  __ret_279; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_low_f16(__p0_280, __p1_280, __p2_280, __p3_280) __extension__ ({ \
-  float32x2_t __s0_280 = __p0_280; \
-  float16x4_t __s1_280 = __p1_280; \
-  float16x4_t __s2_280 = __p2_280; \
-  float32x2_t __ret_280; \
-  __ret_280 = vfmlsl_low_f16(__s0_280, __s1_280, (float16x4_t) {vget_lane_f16(__s2_280, __p3_280), vget_lane_f16(__s2_280, __p3_280), vget_lane_f16(__s2_280, __p3_280), vget_lane_f16(__s2_280, __p3_280)}); \
-  __ret_280; \
-})
-#else
-#define vfmlsl_lane_low_f16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \
-  float32x2_t __s0_281 = __p0_281; \
-  float16x4_t __s1_281 = __p1_281; \
-  float16x4_t __s2_281 = __p2_281; \
-  float32x2_t __rev0_281;  __rev0_281 = __builtin_shufflevector(__s0_281, __s0_281, 1, 0); \
-  float16x4_t __rev1_281;  __rev1_281 = __builtin_shufflevector(__s1_281, __s1_281, 3, 2, 1, 0); \
-  float16x4_t __rev2_281;  __rev2_281 = __builtin_shufflevector(__s2_281, __s2_281, 3, 2, 1, 0); \
-  float32x2_t __ret_281; \
-  __ret_281 = __noswap_vfmlsl_low_f16(__rev0_281, __rev1_281, (float16x4_t) {__noswap_vget_lane_f16(__rev2_281, __p3_281), __noswap_vget_lane_f16(__rev2_281, __p3_281), __noswap_vget_lane_f16(__rev2_281, __p3_281), __noswap_vget_lane_f16(__rev2_281, __p3_281)}); \
-  __ret_281 = __builtin_shufflevector(__ret_281, __ret_281, 1, 0); \
-  __ret_281; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_high_f16(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \
-  float32x4_t __s0_282 = __p0_282; \
-  float16x8_t __s1_282 = __p1_282; \
-  float16x8_t __s2_282 = __p2_282; \
-  float32x4_t __ret_282; \
-  __ret_282 = vfmlslq_high_f16(__s0_282, __s1_282, (float16x8_t) {vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282), vgetq_lane_f16(__s2_282, __p3_282)}); \
-  __ret_282; \
-})
-#else
-#define vfmlslq_laneq_high_f16(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \
-  float32x4_t __s0_283 = __p0_283; \
-  float16x8_t __s1_283 = __p1_283; \
-  float16x8_t __s2_283 = __p2_283; \
-  float32x4_t __rev0_283;  __rev0_283 = __builtin_shufflevector(__s0_283, __s0_283, 3, 2, 1, 0); \
-  float16x8_t __rev1_283;  __rev1_283 = __builtin_shufflevector(__s1_283, __s1_283, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_283;  __rev2_283 = __builtin_shufflevector(__s2_283, __s2_283, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_283; \
-  __ret_283 = __noswap_vfmlslq_high_f16(__rev0_283, __rev1_283, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283), __noswap_vgetq_lane_f16(__rev2_283, __p3_283)}); \
-  __ret_283 = __builtin_shufflevector(__ret_283, __ret_283, 3, 2, 1, 0); \
-  __ret_283; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_high_f16(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \
-  float32x2_t __s0_284 = __p0_284; \
-  float16x4_t __s1_284 = __p1_284; \
-  float16x8_t __s2_284 = __p2_284; \
-  float32x2_t __ret_284; \
-  __ret_284 = vfmlsl_high_f16(__s0_284, __s1_284, (float16x4_t) {vgetq_lane_f16(__s2_284, __p3_284), vgetq_lane_f16(__s2_284, __p3_284), vgetq_lane_f16(__s2_284, __p3_284), vgetq_lane_f16(__s2_284, __p3_284)}); \
-  __ret_284; \
-})
-#else
-#define vfmlsl_laneq_high_f16(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \
-  float32x2_t __s0_285 = __p0_285; \
-  float16x4_t __s1_285 = __p1_285; \
-  float16x8_t __s2_285 = __p2_285; \
-  float32x2_t __rev0_285;  __rev0_285 = __builtin_shufflevector(__s0_285, __s0_285, 1, 0); \
-  float16x4_t __rev1_285;  __rev1_285 = __builtin_shufflevector(__s1_285, __s1_285, 3, 2, 1, 0); \
-  float16x8_t __rev2_285;  __rev2_285 = __builtin_shufflevector(__s2_285, __s2_285, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_285; \
-  __ret_285 = __noswap_vfmlsl_high_f16(__rev0_285, __rev1_285, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_285, __p3_285), __noswap_vgetq_lane_f16(__rev2_285, __p3_285), __noswap_vgetq_lane_f16(__rev2_285, __p3_285), __noswap_vgetq_lane_f16(__rev2_285, __p3_285)}); \
-  __ret_285 = __builtin_shufflevector(__ret_285, __ret_285, 1, 0); \
-  __ret_285; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_low_f16(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \
-  float32x4_t __s0_286 = __p0_286; \
-  float16x8_t __s1_286 = __p1_286; \
-  float16x8_t __s2_286 = __p2_286; \
-  float32x4_t __ret_286; \
-  __ret_286 = vfmlslq_low_f16(__s0_286, __s1_286, (float16x8_t) {vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286), vgetq_lane_f16(__s2_286, __p3_286)}); \
-  __ret_286; \
-})
-#else
-#define vfmlslq_laneq_low_f16(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \
-  float32x4_t __s0_287 = __p0_287; \
-  float16x8_t __s1_287 = __p1_287; \
-  float16x8_t __s2_287 = __p2_287; \
-  float32x4_t __rev0_287;  __rev0_287 = __builtin_shufflevector(__s0_287, __s0_287, 3, 2, 1, 0); \
-  float16x8_t __rev1_287;  __rev1_287 = __builtin_shufflevector(__s1_287, __s1_287, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_287;  __rev2_287 = __builtin_shufflevector(__s2_287, __s2_287, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_287; \
-  __ret_287 = __noswap_vfmlslq_low_f16(__rev0_287, __rev1_287, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287), __noswap_vgetq_lane_f16(__rev2_287, __p3_287)}); \
-  __ret_287 = __builtin_shufflevector(__ret_287, __ret_287, 3, 2, 1, 0); \
-  __ret_287; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_low_f16(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \
-  float32x2_t __s0_288 = __p0_288; \
-  float16x4_t __s1_288 = __p1_288; \
-  float16x8_t __s2_288 = __p2_288; \
-  float32x2_t __ret_288; \
-  __ret_288 = vfmlsl_low_f16(__s0_288, __s1_288, (float16x4_t) {vgetq_lane_f16(__s2_288, __p3_288), vgetq_lane_f16(__s2_288, __p3_288), vgetq_lane_f16(__s2_288, __p3_288), vgetq_lane_f16(__s2_288, __p3_288)}); \
-  __ret_288; \
-})
-#else
-#define vfmlsl_laneq_low_f16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \
-  float32x2_t __s0_289 = __p0_289; \
-  float16x4_t __s1_289 = __p1_289; \
-  float16x8_t __s2_289 = __p2_289; \
-  float32x2_t __rev0_289;  __rev0_289 = __builtin_shufflevector(__s0_289, __s0_289, 1, 0); \
-  float16x4_t __rev1_289;  __rev1_289 = __builtin_shufflevector(__s1_289, __s1_289, 3, 2, 1, 0); \
-  float16x8_t __rev2_289;  __rev2_289 = __builtin_shufflevector(__s2_289, __s2_289, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_289; \
-  __ret_289 = __noswap_vfmlsl_low_f16(__rev0_289, __rev1_289, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_289, __p3_289), __noswap_vgetq_lane_f16(__rev2_289, __p3_289), __noswap_vgetq_lane_f16(__rev2_289, __p3_289), __noswap_vgetq_lane_f16(__rev2_289, __p3_289)}); \
-  __ret_289 = __builtin_shufflevector(__ret_289, __ret_289, 1, 0); \
-  __ret_289; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vmulh_lane_f16(__p0_290, __p1_290, __p2_290) __extension__ ({ \
-  float16_t __s0_290 = __p0_290; \
-  float16x4_t __s1_290 = __p1_290; \
-  float16_t __ret_290; \
-  __ret_290 = __s0_290 * vget_lane_f16(__s1_290, __p2_290); \
-  __ret_290; \
-})
-#else
-#define vmulh_lane_f16(__p0_291, __p1_291, __p2_291) __extension__ ({ \
-  float16_t __s0_291 = __p0_291; \
-  float16x4_t __s1_291 = __p1_291; \
-  float16x4_t __rev1_291;  __rev1_291 = __builtin_shufflevector(__s1_291, __s1_291, 3, 2, 1, 0); \
-  float16_t __ret_291; \
-  __ret_291 = __s0_291 * __noswap_vget_lane_f16(__rev1_291, __p2_291); \
-  __ret_291; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulh_laneq_f16(__p0_292, __p1_292, __p2_292) __extension__ ({ \
-  float16_t __s0_292 = __p0_292; \
-  float16x8_t __s1_292 = __p1_292; \
-  float16_t __ret_292; \
-  __ret_292 = __s0_292 * vgetq_lane_f16(__s1_292, __p2_292); \
-  __ret_292; \
-})
-#else
-#define vmulh_laneq_f16(__p0_293, __p1_293, __p2_293) __extension__ ({ \
-  float16_t __s0_293 = __p0_293; \
-  float16x8_t __s1_293 = __p1_293; \
-  float16x8_t __rev1_293;  __rev1_293 = __builtin_shufflevector(__s1_293, __s1_293, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_293; \
-  __ret_293 = __s0_293 * __noswap_vgetq_lane_f16(__rev1_293, __p2_293); \
-  __ret_293; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
-__ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
-  int32_t __ret;
-  __ret = vqadds_s32(__p0, vqrdmulhs_s32(__p1, __p2));
-  return __ret;
-}
-__ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
-  int16_t __ret;
-  __ret = vqaddh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_lane_s32(__p0_294, __p1_294, __p2_294, __p3_294) __extension__ ({ \
-  int32_t __s0_294 = __p0_294; \
-  int32_t __s1_294 = __p1_294; \
-  int32x2_t __s2_294 = __p2_294; \
-  int32_t __ret_294; \
-  __ret_294 = vqadds_s32(__s0_294, vqrdmulhs_s32(__s1_294, vget_lane_s32(__s2_294, __p3_294))); \
-  __ret_294; \
-})
-#else
-#define vqrdmlahs_lane_s32(__p0_295, __p1_295, __p2_295, __p3_295) __extension__ ({ \
-  int32_t __s0_295 = __p0_295; \
-  int32_t __s1_295 = __p1_295; \
-  int32x2_t __s2_295 = __p2_295; \
-  int32x2_t __rev2_295;  __rev2_295 = __builtin_shufflevector(__s2_295, __s2_295, 1, 0); \
-  int32_t __ret_295; \
-  __ret_295 = vqadds_s32(__s0_295, vqrdmulhs_s32(__s1_295, __noswap_vget_lane_s32(__rev2_295, __p3_295))); \
-  __ret_295; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_lane_s16(__p0_296, __p1_296, __p2_296, __p3_296) __extension__ ({ \
-  int16_t __s0_296 = __p0_296; \
-  int16_t __s1_296 = __p1_296; \
-  int16x4_t __s2_296 = __p2_296; \
-  int16_t __ret_296; \
-  __ret_296 = vqaddh_s16(__s0_296, vqrdmulhh_s16(__s1_296, vget_lane_s16(__s2_296, __p3_296))); \
-  __ret_296; \
-})
-#else
-#define vqrdmlahh_lane_s16(__p0_297, __p1_297, __p2_297, __p3_297) __extension__ ({ \
-  int16_t __s0_297 = __p0_297; \
-  int16_t __s1_297 = __p1_297; \
-  int16x4_t __s2_297 = __p2_297; \
-  int16x4_t __rev2_297;  __rev2_297 = __builtin_shufflevector(__s2_297, __s2_297, 3, 2, 1, 0); \
-  int16_t __ret_297; \
-  __ret_297 = vqaddh_s16(__s0_297, vqrdmulhh_s16(__s1_297, __noswap_vget_lane_s16(__rev2_297, __p3_297))); \
-  __ret_297; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_laneq_s32(__p0_298, __p1_298, __p2_298, __p3_298) __extension__ ({ \
-  int32_t __s0_298 = __p0_298; \
-  int32_t __s1_298 = __p1_298; \
-  int32x4_t __s2_298 = __p2_298; \
-  int32_t __ret_298; \
-  __ret_298 = vqadds_s32(__s0_298, vqrdmulhs_s32(__s1_298, vgetq_lane_s32(__s2_298, __p3_298))); \
-  __ret_298; \
-})
-#else
-#define vqrdmlahs_laneq_s32(__p0_299, __p1_299, __p2_299, __p3_299) __extension__ ({ \
-  int32_t __s0_299 = __p0_299; \
-  int32_t __s1_299 = __p1_299; \
-  int32x4_t __s2_299 = __p2_299; \
-  int32x4_t __rev2_299;  __rev2_299 = __builtin_shufflevector(__s2_299, __s2_299, 3, 2, 1, 0); \
-  int32_t __ret_299; \
-  __ret_299 = vqadds_s32(__s0_299, vqrdmulhs_s32(__s1_299, __noswap_vgetq_lane_s32(__rev2_299, __p3_299))); \
-  __ret_299; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_laneq_s16(__p0_300, __p1_300, __p2_300, __p3_300) __extension__ ({ \
-  int16_t __s0_300 = __p0_300; \
-  int16_t __s1_300 = __p1_300; \
-  int16x8_t __s2_300 = __p2_300; \
-  int16_t __ret_300; \
-  __ret_300 = vqaddh_s16(__s0_300, vqrdmulhh_s16(__s1_300, vgetq_lane_s16(__s2_300, __p3_300))); \
-  __ret_300; \
-})
-#else
-#define vqrdmlahh_laneq_s16(__p0_301, __p1_301, __p2_301, __p3_301) __extension__ ({ \
-  int16_t __s0_301 = __p0_301; \
-  int16_t __s1_301 = __p1_301; \
-  int16x8_t __s2_301 = __p2_301; \
-  int16x8_t __rev2_301;  __rev2_301 = __builtin_shufflevector(__s2_301, __s2_301, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_301; \
-  __ret_301 = vqaddh_s16(__s0_301, vqrdmulhh_s16(__s1_301, __noswap_vgetq_lane_s16(__rev2_301, __p3_301))); \
-  __ret_301; \
-})
-#endif
-
-__ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
-  int32_t __ret;
-  __ret = vqsubs_s32(__p0, vqrdmulhs_s32(__p1, __p2));
-  return __ret;
-}
-__ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
-  int16_t __ret;
-  __ret = vqsubh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_lane_s32(__p0_302, __p1_302, __p2_302, __p3_302) __extension__ ({ \
-  int32_t __s0_302 = __p0_302; \
-  int32_t __s1_302 = __p1_302; \
-  int32x2_t __s2_302 = __p2_302; \
-  int32_t __ret_302; \
-  __ret_302 = vqsubs_s32(__s0_302, vqrdmulhs_s32(__s1_302, vget_lane_s32(__s2_302, __p3_302))); \
-  __ret_302; \
-})
-#else
-#define vqrdmlshs_lane_s32(__p0_303, __p1_303, __p2_303, __p3_303) __extension__ ({ \
-  int32_t __s0_303 = __p0_303; \
-  int32_t __s1_303 = __p1_303; \
-  int32x2_t __s2_303 = __p2_303; \
-  int32x2_t __rev2_303;  __rev2_303 = __builtin_shufflevector(__s2_303, __s2_303, 1, 0); \
-  int32_t __ret_303; \
-  __ret_303 = vqsubs_s32(__s0_303, vqrdmulhs_s32(__s1_303, __noswap_vget_lane_s32(__rev2_303, __p3_303))); \
-  __ret_303; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_lane_s16(__p0_304, __p1_304, __p2_304, __p3_304) __extension__ ({ \
-  int16_t __s0_304 = __p0_304; \
-  int16_t __s1_304 = __p1_304; \
-  int16x4_t __s2_304 = __p2_304; \
-  int16_t __ret_304; \
-  __ret_304 = vqsubh_s16(__s0_304, vqrdmulhh_s16(__s1_304, vget_lane_s16(__s2_304, __p3_304))); \
-  __ret_304; \
-})
-#else
-#define vqrdmlshh_lane_s16(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \
-  int16_t __s0_305 = __p0_305; \
-  int16_t __s1_305 = __p1_305; \
-  int16x4_t __s2_305 = __p2_305; \
-  int16x4_t __rev2_305;  __rev2_305 = __builtin_shufflevector(__s2_305, __s2_305, 3, 2, 1, 0); \
-  int16_t __ret_305; \
-  __ret_305 = vqsubh_s16(__s0_305, vqrdmulhh_s16(__s1_305, __noswap_vget_lane_s16(__rev2_305, __p3_305))); \
-  __ret_305; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_laneq_s32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \
-  int32_t __s0_306 = __p0_306; \
-  int32_t __s1_306 = __p1_306; \
-  int32x4_t __s2_306 = __p2_306; \
-  int32_t __ret_306; \
-  __ret_306 = vqsubs_s32(__s0_306, vqrdmulhs_s32(__s1_306, vgetq_lane_s32(__s2_306, __p3_306))); \
-  __ret_306; \
-})
-#else
-#define vqrdmlshs_laneq_s32(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \
-  int32_t __s0_307 = __p0_307; \
-  int32_t __s1_307 = __p1_307; \
-  int32x4_t __s2_307 = __p2_307; \
-  int32x4_t __rev2_307;  __rev2_307 = __builtin_shufflevector(__s2_307, __s2_307, 3, 2, 1, 0); \
-  int32_t __ret_307; \
-  __ret_307 = vqsubs_s32(__s0_307, vqrdmulhs_s32(__s1_307, __noswap_vgetq_lane_s32(__rev2_307, __p3_307))); \
-  __ret_307; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_laneq_s16(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \
-  int16_t __s0_308 = __p0_308; \
-  int16_t __s1_308 = __p1_308; \
-  int16x8_t __s2_308 = __p2_308; \
-  int16_t __ret_308; \
-  __ret_308 = vqsubh_s16(__s0_308, vqrdmulhh_s16(__s1_308, vgetq_lane_s16(__s2_308, __p3_308))); \
-  __ret_308; \
-})
-#else
-#define vqrdmlshh_laneq_s16(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \
-  int16_t __s0_309 = __p0_309; \
-  int16_t __s1_309 = __p1_309; \
-  int16x8_t __s2_309 = __p2_309; \
-  int16x8_t __rev2_309;  __rev2_309 = __builtin_shufflevector(__s2_309, __s2_309, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_309; \
-  __ret_309 = vqsubh_s16(__s0_309, vqrdmulhh_s16(__s1_309, __noswap_vgetq_lane_s16(__rev2_309, __p3_309))); \
-  __ret_309; \
-})
-#endif
-
-#endif
-#if defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vabdl_u8(vget_high_u8(__p0), vget_high_u8(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vabdl_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vabdl_u32(vget_high_u32(__p0), vget_high_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vabdl_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vabdl_u16(vget_high_u16(__p0), vget_high_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vabdl_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vabdl_s8(vget_high_s8(__p0), vget_high_s8(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vabdl_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vabdl_s32(vget_high_s32(__p0), vget_high_s32(__p1));
-  return __ret;
-}
-#else
-__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vabdl_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vabdl_s16(vget_high_s16(__p0), vget_high_s16(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vabdl_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmovl_high_u8(__p0) + vmovl_high_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmovl_high_u8(__rev0) + __noswap_vmovl_high_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmovl_high_u32(__p0) + vmovl_high_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmovl_high_u32(__rev0) + __noswap_vmovl_high_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmovl_high_u16(__p0) + vmovl_high_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmovl_high_u16(__rev0) + __noswap_vmovl_high_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vmovl_high_s8(__p0) + vmovl_high_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmovl_high_s8(__rev0) + __noswap_vmovl_high_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vmovl_high_s32(__p0) + vmovl_high_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmovl_high_s32(__rev0) + __noswap_vmovl_high_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vmovl_high_s16(__p0) + vmovl_high_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmovl_high_s16(__rev0) + __noswap_vmovl_high_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 + vmovl_high_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 + vmovl_high_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 + vmovl_high_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 + vmovl_high_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 + vmovl_high_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 + vmovl_high_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p64(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \
-  poly64x2_t __s0_310 = __p0_310; \
-  poly64x1_t __s2_310 = __p2_310; \
-  poly64x2_t __ret_310; \
-  __ret_310 = vsetq_lane_p64(vget_lane_p64(__s2_310, __p3_310), __s0_310, __p1_310); \
-  __ret_310; \
-})
-#else
-#define vcopyq_lane_p64(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \
-  poly64x2_t __s0_311 = __p0_311; \
-  poly64x1_t __s2_311 = __p2_311; \
-  poly64x2_t __rev0_311;  __rev0_311 = __builtin_shufflevector(__s0_311, __s0_311, 1, 0); \
-  poly64x2_t __ret_311; \
-  __ret_311 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_311, __p3_311), __rev0_311, __p1_311); \
-  __ret_311 = __builtin_shufflevector(__ret_311, __ret_311, 1, 0); \
-  __ret_311; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_f64(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \
-  float64x2_t __s0_312 = __p0_312; \
-  float64x1_t __s2_312 = __p2_312; \
-  float64x2_t __ret_312; \
-  __ret_312 = vsetq_lane_f64(vget_lane_f64(__s2_312, __p3_312), __s0_312, __p1_312); \
-  __ret_312; \
-})
-#else
-#define vcopyq_lane_f64(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \
-  float64x2_t __s0_313 = __p0_313; \
-  float64x1_t __s2_313 = __p2_313; \
-  float64x2_t __rev0_313;  __rev0_313 = __builtin_shufflevector(__s0_313, __s0_313, 1, 0); \
-  float64x2_t __ret_313; \
-  __ret_313 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_313, __p3_313), __rev0_313, __p1_313); \
-  __ret_313 = __builtin_shufflevector(__ret_313, __ret_313, 1, 0); \
-  __ret_313; \
-})
-#endif
-
-#define vcopy_lane_p64(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \
-  poly64x1_t __s0_314 = __p0_314; \
-  poly64x1_t __s2_314 = __p2_314; \
-  poly64x1_t __ret_314; \
-  __ret_314 = vset_lane_p64(vget_lane_p64(__s2_314, __p3_314), __s0_314, __p1_314); \
-  __ret_314; \
-})
-#define vcopy_lane_f64(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \
-  float64x1_t __s0_315 = __p0_315; \
-  float64x1_t __s2_315 = __p2_315; \
-  float64x1_t __ret_315; \
-  __ret_315 = vset_lane_f64(vget_lane_f64(__s2_315, __p3_315), __s0_315, __p1_315); \
-  __ret_315; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p64(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \
-  poly64x2_t __s0_316 = __p0_316; \
-  poly64x2_t __s2_316 = __p2_316; \
-  poly64x2_t __ret_316; \
-  __ret_316 = vsetq_lane_p64(vgetq_lane_p64(__s2_316, __p3_316), __s0_316, __p1_316); \
-  __ret_316; \
-})
-#else
-#define vcopyq_laneq_p64(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \
-  poly64x2_t __s0_317 = __p0_317; \
-  poly64x2_t __s2_317 = __p2_317; \
-  poly64x2_t __rev0_317;  __rev0_317 = __builtin_shufflevector(__s0_317, __s0_317, 1, 0); \
-  poly64x2_t __rev2_317;  __rev2_317 = __builtin_shufflevector(__s2_317, __s2_317, 1, 0); \
-  poly64x2_t __ret_317; \
-  __ret_317 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_317, __p3_317), __rev0_317, __p1_317); \
-  __ret_317 = __builtin_shufflevector(__ret_317, __ret_317, 1, 0); \
-  __ret_317; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_f64(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \
-  float64x2_t __s0_318 = __p0_318; \
-  float64x2_t __s2_318 = __p2_318; \
-  float64x2_t __ret_318; \
-  __ret_318 = vsetq_lane_f64(vgetq_lane_f64(__s2_318, __p3_318), __s0_318, __p1_318); \
-  __ret_318; \
-})
-#else
-#define vcopyq_laneq_f64(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \
-  float64x2_t __s0_319 = __p0_319; \
-  float64x2_t __s2_319 = __p2_319; \
-  float64x2_t __rev0_319;  __rev0_319 = __builtin_shufflevector(__s0_319, __s0_319, 1, 0); \
-  float64x2_t __rev2_319;  __rev2_319 = __builtin_shufflevector(__s2_319, __s2_319, 1, 0); \
-  float64x2_t __ret_319; \
-  __ret_319 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_319, __p3_319), __rev0_319, __p1_319); \
-  __ret_319 = __builtin_shufflevector(__ret_319, __ret_319, 1, 0); \
-  __ret_319; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p64(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \
-  poly64x1_t __s0_320 = __p0_320; \
-  poly64x2_t __s2_320 = __p2_320; \
-  poly64x1_t __ret_320; \
-  __ret_320 = vset_lane_p64(vgetq_lane_p64(__s2_320, __p3_320), __s0_320, __p1_320); \
-  __ret_320; \
-})
-#else
-#define vcopy_laneq_p64(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \
-  poly64x1_t __s0_321 = __p0_321; \
-  poly64x2_t __s2_321 = __p2_321; \
-  poly64x2_t __rev2_321;  __rev2_321 = __builtin_shufflevector(__s2_321, __s2_321, 1, 0); \
-  poly64x1_t __ret_321; \
-  __ret_321 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_321, __p3_321), __s0_321, __p1_321); \
-  __ret_321; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_f64(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \
-  float64x1_t __s0_322 = __p0_322; \
-  float64x2_t __s2_322 = __p2_322; \
-  float64x1_t __ret_322; \
-  __ret_322 = vset_lane_f64(vgetq_lane_f64(__s2_322, __p3_322), __s0_322, __p1_322); \
-  __ret_322; \
-})
-#else
-#define vcopy_laneq_f64(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \
-  float64x1_t __s0_323 = __p0_323; \
-  float64x2_t __s2_323 = __p2_323; \
-  float64x2_t __rev2_323;  __rev2_323 = __builtin_shufflevector(__s2_323, __s2_323, 1, 0); \
-  float64x1_t __ret_323; \
-  __ret_323 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_323, __p3_323), __s0_323, __p1_323); \
-  __ret_323; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __ret;
-  __ret = vmlal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmlal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __ret;
-  __ret = vmlal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmlal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __ret;
-  __ret = vmlal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmlal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __ret;
-  __ret = vmlal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmlal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = vmlal_n_u32(__p0, vget_high_u32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmlal_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = vmlal_n_u16(__p0, vget_high_u16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmlal_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __ret;
-  __ret = vmlsl_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmlsl_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __ret;
-  __ret = vmlsl_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmlsl_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __ret;
-  __ret = vmlsl_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmlsl_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __ret;
-  __ret = vmlsl_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmlsl_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = vmlsl_n_u32(__p0, vget_high_u32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmlsl_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = vmlsl_n_u16(__p0, vget_high_u16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmlsl_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vmulx_lane_f64(__p0_324, __p1_324, __p2_324) __extension__ ({ \
-  float64x1_t __s0_324 = __p0_324; \
-  float64x1_t __s1_324 = __p1_324; \
-  float64x1_t __ret_324; \
-  float64_t __x_324 = vget_lane_f64(__s0_324, 0); \
-  float64_t __y_324 = vget_lane_f64(__s1_324, __p2_324); \
-  float64_t __z_324 = vmulxd_f64(__x_324, __y_324); \
-  __ret_324 = vset_lane_f64(__z_324, __s0_324, __p2_324); \
-  __ret_324; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f64(__p0_325, __p1_325, __p2_325) __extension__ ({ \
-  float64x1_t __s0_325 = __p0_325; \
-  float64x2_t __s1_325 = __p1_325; \
-  float64x1_t __ret_325; \
-  float64_t __x_325 = vget_lane_f64(__s0_325, 0); \
-  float64_t __y_325 = vgetq_lane_f64(__s1_325, __p2_325); \
-  float64_t __z_325 = vmulxd_f64(__x_325, __y_325); \
-  __ret_325 = vset_lane_f64(__z_325, __s0_325, 0); \
-  __ret_325; \
-})
-#else
-#define vmulx_laneq_f64(__p0_326, __p1_326, __p2_326) __extension__ ({ \
-  float64x1_t __s0_326 = __p0_326; \
-  float64x2_t __s1_326 = __p1_326; \
-  float64x2_t __rev1_326;  __rev1_326 = __builtin_shufflevector(__s1_326, __s1_326, 1, 0); \
-  float64x1_t __ret_326; \
-  float64_t __x_326 = vget_lane_f64(__s0_326, 0); \
-  float64_t __y_326 = __noswap_vgetq_lane_f64(__rev1_326, __p2_326); \
-  float64_t __z_326 = vmulxd_f64(__x_326, __y_326); \
-  __ret_326 = vset_lane_f64(__z_326, __s0_326, 0); \
-  __ret_326; \
-})
-#endif
-
-#endif
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + vabdl_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vabdl_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + __noswap_vabdl_u8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + vabdl_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vabdl_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + __noswap_vabdl_u32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + vabdl_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vabdl_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __noswap_vabdl_u16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + vabdl_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vabdl_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + __noswap_vabdl_s8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + vabdl_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vabdl_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + __noswap_vabdl_s32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + vabdl_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vabdl_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __noswap_vabdl_s16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#if defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __ret;
-  __ret = vabal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vabal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __ret;
-  __ret = vabal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
-  return __ret;
-}
-#else
-__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vabal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __ret;
-  __ret = vabal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vabal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __ret;
-  __ret = vabal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vabal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vabal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vabal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vabal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vabal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-
-#undef __ai
-
-#endif /* __ARM_NEON_H */
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512fintrin.h b/linux-x86/lib64/clang/10.0.1/include/avx512fintrin.h
deleted file mode 100644
index 4e341a1..0000000
--- a/linux-x86/lib64/clang/10.0.1/include/avx512fintrin.h
+++ /dev/null
@@ -1,9683 +0,0 @@
-/*===---- avx512fintrin.h - AVX512F intrinsics -----------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __IMMINTRIN_H
-#error "Never use <avx512fintrin.h> directly; include <immintrin.h> instead."
-#endif
-
-#ifndef __AVX512FINTRIN_H
-#define __AVX512FINTRIN_H
-
-typedef char __v64qi __attribute__((__vector_size__(64)));
-typedef short __v32hi __attribute__((__vector_size__(64)));
-typedef double __v8df __attribute__((__vector_size__(64)));
-typedef float __v16sf __attribute__((__vector_size__(64)));
-typedef long long __v8di __attribute__((__vector_size__(64)));
-typedef int __v16si __attribute__((__vector_size__(64)));
-
-/* Unsigned types */
-typedef unsigned char __v64qu __attribute__((__vector_size__(64)));
-typedef unsigned short __v32hu __attribute__((__vector_size__(64)));
-typedef unsigned long long __v8du __attribute__((__vector_size__(64)));
-typedef unsigned int __v16su __attribute__((__vector_size__(64)));
-
-typedef float __m512 __attribute__((__vector_size__(64), __aligned__(64)));
-typedef double __m512d __attribute__((__vector_size__(64), __aligned__(64)));
-typedef long long __m512i __attribute__((__vector_size__(64), __aligned__(64)));
-
-typedef float __m512_u __attribute__((__vector_size__(64), __aligned__(1)));
-typedef double __m512d_u __attribute__((__vector_size__(64), __aligned__(1)));
-typedef long long __m512i_u __attribute__((__vector_size__(64), __aligned__(1)));
-
-typedef unsigned char __mmask8;
-typedef unsigned short __mmask16;
-
-/* Rounding mode macros.  */
-#define _MM_FROUND_TO_NEAREST_INT   0x00
-#define _MM_FROUND_TO_NEG_INF       0x01
-#define _MM_FROUND_TO_POS_INF       0x02
-#define _MM_FROUND_TO_ZERO          0x03
-#define _MM_FROUND_CUR_DIRECTION    0x04
-
-/* Constants for integer comparison predicates */
-typedef enum {
-    _MM_CMPINT_EQ,      /* Equal */
-    _MM_CMPINT_LT,      /* Less than */
-    _MM_CMPINT_LE,      /* Less than or Equal */
-    _MM_CMPINT_UNUSED,
-    _MM_CMPINT_NE,      /* Not Equal */
-    _MM_CMPINT_NLT,     /* Not Less than */
-#define _MM_CMPINT_GE   _MM_CMPINT_NLT  /* Greater than or Equal */
-    _MM_CMPINT_NLE      /* Not Less than or Equal */
-#define _MM_CMPINT_GT   _MM_CMPINT_NLE  /* Greater than */
-} _MM_CMPINT_ENUM;
-
-typedef enum
-{
-  _MM_PERM_AAAA = 0x00, _MM_PERM_AAAB = 0x01, _MM_PERM_AAAC = 0x02,
-  _MM_PERM_AAAD = 0x03, _MM_PERM_AABA = 0x04, _MM_PERM_AABB = 0x05,
-  _MM_PERM_AABC = 0x06, _MM_PERM_AABD = 0x07, _MM_PERM_AACA = 0x08,
-  _MM_PERM_AACB = 0x09, _MM_PERM_AACC = 0x0A, _MM_PERM_AACD = 0x0B,
-  _MM_PERM_AADA = 0x0C, _MM_PERM_AADB = 0x0D, _MM_PERM_AADC = 0x0E,
-  _MM_PERM_AADD = 0x0F, _MM_PERM_ABAA = 0x10, _MM_PERM_ABAB = 0x11,
-  _MM_PERM_ABAC = 0x12, _MM_PERM_ABAD = 0x13, _MM_PERM_ABBA = 0x14,
-  _MM_PERM_ABBB = 0x15, _MM_PERM_ABBC = 0x16, _MM_PERM_ABBD = 0x17,
-  _MM_PERM_ABCA = 0x18, _MM_PERM_ABCB = 0x19, _MM_PERM_ABCC = 0x1A,
-  _MM_PERM_ABCD = 0x1B, _MM_PERM_ABDA = 0x1C, _MM_PERM_ABDB = 0x1D,
-  _MM_PERM_ABDC = 0x1E, _MM_PERM_ABDD = 0x1F, _MM_PERM_ACAA = 0x20,
-  _MM_PERM_ACAB = 0x21, _MM_PERM_ACAC = 0x22, _MM_PERM_ACAD = 0x23,
-  _MM_PERM_ACBA = 0x24, _MM_PERM_ACBB = 0x25, _MM_PERM_ACBC = 0x26,
-  _MM_PERM_ACBD = 0x27, _MM_PERM_ACCA = 0x28, _MM_PERM_ACCB = 0x29,
-  _MM_PERM_ACCC = 0x2A, _MM_PERM_ACCD = 0x2B, _MM_PERM_ACDA = 0x2C,
-  _MM_PERM_ACDB = 0x2D, _MM_PERM_ACDC = 0x2E, _MM_PERM_ACDD = 0x2F,
-  _MM_PERM_ADAA = 0x30, _MM_PERM_ADAB = 0x31, _MM_PERM_ADAC = 0x32,
-  _MM_PERM_ADAD = 0x33, _MM_PERM_ADBA = 0x34, _MM_PERM_ADBB = 0x35,
-  _MM_PERM_ADBC = 0x36, _MM_PERM_ADBD = 0x37, _MM_PERM_ADCA = 0x38,
-  _MM_PERM_ADCB = 0x39, _MM_PERM_ADCC = 0x3A, _MM_PERM_ADCD = 0x3B,
-  _MM_PERM_ADDA = 0x3C, _MM_PERM_ADDB = 0x3D, _MM_PERM_ADDC = 0x3E,
-  _MM_PERM_ADDD = 0x3F, _MM_PERM_BAAA = 0x40, _MM_PERM_BAAB = 0x41,
-  _MM_PERM_BAAC = 0x42, _MM_PERM_BAAD = 0x43, _MM_PERM_BABA = 0x44,
-  _MM_PERM_BABB = 0x45, _MM_PERM_BABC = 0x46, _MM_PERM_BABD = 0x47,
-  _MM_PERM_BACA = 0x48, _MM_PERM_BACB = 0x49, _MM_PERM_BACC = 0x4A,
-  _MM_PERM_BACD = 0x4B, _MM_PERM_BADA = 0x4C, _MM_PERM_BADB = 0x4D,
-  _MM_PERM_BADC = 0x4E, _MM_PERM_BADD = 0x4F, _MM_PERM_BBAA = 0x50,
-  _MM_PERM_BBAB = 0x51, _MM_PERM_BBAC = 0x52, _MM_PERM_BBAD = 0x53,
-  _MM_PERM_BBBA = 0x54, _MM_PERM_BBBB = 0x55, _MM_PERM_BBBC = 0x56,
-  _MM_PERM_BBBD = 0x57, _MM_PERM_BBCA = 0x58, _MM_PERM_BBCB = 0x59,
-  _MM_PERM_BBCC = 0x5A, _MM_PERM_BBCD = 0x5B, _MM_PERM_BBDA = 0x5C,
-  _MM_PERM_BBDB = 0x5D, _MM_PERM_BBDC = 0x5E, _MM_PERM_BBDD = 0x5F,
-  _MM_PERM_BCAA = 0x60, _MM_PERM_BCAB = 0x61, _MM_PERM_BCAC = 0x62,
-  _MM_PERM_BCAD = 0x63, _MM_PERM_BCBA = 0x64, _MM_PERM_BCBB = 0x65,
-  _MM_PERM_BCBC = 0x66, _MM_PERM_BCBD = 0x67, _MM_PERM_BCCA = 0x68,
-  _MM_PERM_BCCB = 0x69, _MM_PERM_BCCC = 0x6A, _MM_PERM_BCCD = 0x6B,
-  _MM_PERM_BCDA = 0x6C, _MM_PERM_BCDB = 0x6D, _MM_PERM_BCDC = 0x6E,
-  _MM_PERM_BCDD = 0x6F, _MM_PERM_BDAA = 0x70, _MM_PERM_BDAB = 0x71,
-  _MM_PERM_BDAC = 0x72, _MM_PERM_BDAD = 0x73, _MM_PERM_BDBA = 0x74,
-  _MM_PERM_BDBB = 0x75, _MM_PERM_BDBC = 0x76, _MM_PERM_BDBD = 0x77,
-  _MM_PERM_BDCA = 0x78, _MM_PERM_BDCB = 0x79, _MM_PERM_BDCC = 0x7A,
-  _MM_PERM_BDCD = 0x7B, _MM_PERM_BDDA = 0x7C, _MM_PERM_BDDB = 0x7D,
-  _MM_PERM_BDDC = 0x7E, _MM_PERM_BDDD = 0x7F, _MM_PERM_CAAA = 0x80,
-  _MM_PERM_CAAB = 0x81, _MM_PERM_CAAC = 0x82, _MM_PERM_CAAD = 0x83,
-  _MM_PERM_CABA = 0x84, _MM_PERM_CABB = 0x85, _MM_PERM_CABC = 0x86,
-  _MM_PERM_CABD = 0x87, _MM_PERM_CACA = 0x88, _MM_PERM_CACB = 0x89,
-  _MM_PERM_CACC = 0x8A, _MM_PERM_CACD = 0x8B, _MM_PERM_CADA = 0x8C,
-  _MM_PERM_CADB = 0x8D, _MM_PERM_CADC = 0x8E, _MM_PERM_CADD = 0x8F,
-  _MM_PERM_CBAA = 0x90, _MM_PERM_CBAB = 0x91, _MM_PERM_CBAC = 0x92,
-  _MM_PERM_CBAD = 0x93, _MM_PERM_CBBA = 0x94, _MM_PERM_CBBB = 0x95,
-  _MM_PERM_CBBC = 0x96, _MM_PERM_CBBD = 0x97, _MM_PERM_CBCA = 0x98,
-  _MM_PERM_CBCB = 0x99, _MM_PERM_CBCC = 0x9A, _MM_PERM_CBCD = 0x9B,
-  _MM_PERM_CBDA = 0x9C, _MM_PERM_CBDB = 0x9D, _MM_PERM_CBDC = 0x9E,
-  _MM_PERM_CBDD = 0x9F, _MM_PERM_CCAA = 0xA0, _MM_PERM_CCAB = 0xA1,
-  _MM_PERM_CCAC = 0xA2, _MM_PERM_CCAD = 0xA3, _MM_PERM_CCBA = 0xA4,
-  _MM_PERM_CCBB = 0xA5, _MM_PERM_CCBC = 0xA6, _MM_PERM_CCBD = 0xA7,
-  _MM_PERM_CCCA = 0xA8, _MM_PERM_CCCB = 0xA9, _MM_PERM_CCCC = 0xAA,
-  _MM_PERM_CCCD = 0xAB, _MM_PERM_CCDA = 0xAC, _MM_PERM_CCDB = 0xAD,
-  _MM_PERM_CCDC = 0xAE, _MM_PERM_CCDD = 0xAF, _MM_PERM_CDAA = 0xB0,
-  _MM_PERM_CDAB = 0xB1, _MM_PERM_CDAC = 0xB2, _MM_PERM_CDAD = 0xB3,
-  _MM_PERM_CDBA = 0xB4, _MM_PERM_CDBB = 0xB5, _MM_PERM_CDBC = 0xB6,
-  _MM_PERM_CDBD = 0xB7, _MM_PERM_CDCA = 0xB8, _MM_PERM_CDCB = 0xB9,
-  _MM_PERM_CDCC = 0xBA, _MM_PERM_CDCD = 0xBB, _MM_PERM_CDDA = 0xBC,
-  _MM_PERM_CDDB = 0xBD, _MM_PERM_CDDC = 0xBE, _MM_PERM_CDDD = 0xBF,
-  _MM_PERM_DAAA = 0xC0, _MM_PERM_DAAB = 0xC1, _MM_PERM_DAAC = 0xC2,
-  _MM_PERM_DAAD = 0xC3, _MM_PERM_DABA = 0xC4, _MM_PERM_DABB = 0xC5,
-  _MM_PERM_DABC = 0xC6, _MM_PERM_DABD = 0xC7, _MM_PERM_DACA = 0xC8,
-  _MM_PERM_DACB = 0xC9, _MM_PERM_DACC = 0xCA, _MM_PERM_DACD = 0xCB,
-  _MM_PERM_DADA = 0xCC, _MM_PERM_DADB = 0xCD, _MM_PERM_DADC = 0xCE,
-  _MM_PERM_DADD = 0xCF, _MM_PERM_DBAA = 0xD0, _MM_PERM_DBAB = 0xD1,
-  _MM_PERM_DBAC = 0xD2, _MM_PERM_DBAD = 0xD3, _MM_PERM_DBBA = 0xD4,
-  _MM_PERM_DBBB = 0xD5, _MM_PERM_DBBC = 0xD6, _MM_PERM_DBBD = 0xD7,
-  _MM_PERM_DBCA = 0xD8, _MM_PERM_DBCB = 0xD9, _MM_PERM_DBCC = 0xDA,
-  _MM_PERM_DBCD = 0xDB, _MM_PERM_DBDA = 0xDC, _MM_PERM_DBDB = 0xDD,
-  _MM_PERM_DBDC = 0xDE, _MM_PERM_DBDD = 0xDF, _MM_PERM_DCAA = 0xE0,
-  _MM_PERM_DCAB = 0xE1, _MM_PERM_DCAC = 0xE2, _MM_PERM_DCAD = 0xE3,
-  _MM_PERM_DCBA = 0xE4, _MM_PERM_DCBB = 0xE5, _MM_PERM_DCBC = 0xE6,
-  _MM_PERM_DCBD = 0xE7, _MM_PERM_DCCA = 0xE8, _MM_PERM_DCCB = 0xE9,
-  _MM_PERM_DCCC = 0xEA, _MM_PERM_DCCD = 0xEB, _MM_PERM_DCDA = 0xEC,
-  _MM_PERM_DCDB = 0xED, _MM_PERM_DCDC = 0xEE, _MM_PERM_DCDD = 0xEF,
-  _MM_PERM_DDAA = 0xF0, _MM_PERM_DDAB = 0xF1, _MM_PERM_DDAC = 0xF2,
-  _MM_PERM_DDAD = 0xF3, _MM_PERM_DDBA = 0xF4, _MM_PERM_DDBB = 0xF5,
-  _MM_PERM_DDBC = 0xF6, _MM_PERM_DDBD = 0xF7, _MM_PERM_DDCA = 0xF8,
-  _MM_PERM_DDCB = 0xF9, _MM_PERM_DDCC = 0xFA, _MM_PERM_DDCD = 0xFB,
-  _MM_PERM_DDDA = 0xFC, _MM_PERM_DDDB = 0xFD, _MM_PERM_DDDC = 0xFE,
-  _MM_PERM_DDDD = 0xFF
-} _MM_PERM_ENUM;
-
-typedef enum
-{
-  _MM_MANT_NORM_1_2,    /* interval [1, 2)      */
-  _MM_MANT_NORM_p5_2,   /* interval [0.5, 2)    */
-  _MM_MANT_NORM_p5_1,   /* interval [0.5, 1)    */
-  _MM_MANT_NORM_p75_1p5   /* interval [0.75, 1.5) */
-} _MM_MANTISSA_NORM_ENUM;
-
-typedef enum
-{
-  _MM_MANT_SIGN_src,    /* sign = sign(SRC)     */
-  _MM_MANT_SIGN_zero,   /* sign = 0             */
-  _MM_MANT_SIGN_nan   /* DEST = NaN if sign(SRC) = 1 */
-} _MM_MANTISSA_SIGN_ENUM;
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(512)))
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512f")))
-
-/* Create vectors with repeated elements */
-
-static  __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_setzero_si512(void)
-{
-  return __extension__ (__m512i)(__v8di){ 0, 0, 0, 0, 0, 0, 0, 0 };
-}
-
-#define _mm512_setzero_epi32 _mm512_setzero_si512
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_undefined_pd(void)
-{
-  return (__m512d)__builtin_ia32_undef512();
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_undefined(void)
-{
-  return (__m512)__builtin_ia32_undef512();
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_undefined_ps(void)
-{
-  return (__m512)__builtin_ia32_undef512();
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_undefined_epi32(void)
-{
-  return (__m512i)__builtin_ia32_undef512();
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcastd_epi32 (__m128i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v4si) __A, (__v4si) __A,
-                                          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastd_epi32 (__m512i __O, __mmask16 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__M,
-                                             (__v16si) _mm512_broadcastd_epi32(__A),
-                                             (__v16si) __O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastd_epi32 (__mmask16 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__M,
-                                             (__v16si) _mm512_broadcastd_epi32(__A),
-                                             (__v16si) _mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcastq_epi64 (__m128i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v2di) __A, (__v2di) __A,
-                                          0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastq_epi64 (__m512i __O, __mmask8 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__M,
-                                             (__v8di) _mm512_broadcastq_epi64(__A),
-                                             (__v8di) __O);
-
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__M,
-                                             (__v8di) _mm512_broadcastq_epi64(__A),
-                                             (__v8di) _mm512_setzero_si512());
-}
-
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_setzero_ps(void)
-{
-  return __extension__ (__m512){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-                                 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
-}
-
-#define _mm512_setzero _mm512_setzero_ps
-
-static  __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_setzero_pd(void)
-{
-  return __extension__ (__m512d){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_set1_ps(float __w)
-{
-  return __extension__ (__m512){ __w, __w, __w, __w, __w, __w, __w, __w,
-                                 __w, __w, __w, __w, __w, __w, __w, __w  };
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_set1_pd(double __w)
-{
-  return __extension__ (__m512d){ __w, __w, __w, __w, __w, __w, __w, __w };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set1_epi8(char __w)
-{
-  return __extension__ (__m512i)(__v64qi){
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w  };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set1_epi16(short __w)
-{
-  return __extension__ (__m512i)(__v32hi){
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w,
-    __w, __w, __w, __w, __w, __w, __w, __w };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set1_epi32(int __s)
-{
-  return __extension__ (__m512i)(__v16si){
-    __s, __s, __s, __s, __s, __s, __s, __s,
-    __s, __s, __s, __s, __s, __s, __s, __s };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_set1_epi32(__mmask16 __M, int __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__M,
-                                             (__v16si)_mm512_set1_epi32(__A),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set1_epi64(long long __d)
-{
-  return __extension__(__m512i)(__v8di){ __d, __d, __d, __d, __d, __d, __d, __d };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_set1_epi64(__mmask8 __M, long long __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__M,
-                                             (__v8di)_mm512_set1_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_broadcastss_ps(__m128 __A)
-{
-  return (__m512)__builtin_shufflevector((__v4sf) __A, (__v4sf) __A,
-                                         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set4_epi32 (int __A, int __B, int __C, int __D)
-{
-  return __extension__ (__m512i)(__v16si)
-   { __D, __C, __B, __A, __D, __C, __B, __A,
-     __D, __C, __B, __A, __D, __C, __B, __A };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set4_epi64 (long long __A, long long __B, long long __C,
-       long long __D)
-{
-  return __extension__ (__m512i) (__v8di)
-   { __D, __C, __B, __A, __D, __C, __B, __A };
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_set4_pd (double __A, double __B, double __C, double __D)
-{
-  return __extension__ (__m512d)
-   { __D, __C, __B, __A, __D, __C, __B, __A };
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_set4_ps (float __A, float __B, float __C, float __D)
-{
-  return __extension__ (__m512)
-   { __D, __C, __B, __A, __D, __C, __B, __A,
-     __D, __C, __B, __A, __D, __C, __B, __A };
-}
-
-#define _mm512_setr4_epi32(e0,e1,e2,e3)               \
-  _mm512_set4_epi32((e3),(e2),(e1),(e0))
-
-#define _mm512_setr4_epi64(e0,e1,e2,e3)               \
-  _mm512_set4_epi64((e3),(e2),(e1),(e0))
-
-#define _mm512_setr4_pd(e0,e1,e2,e3)                \
-  _mm512_set4_pd((e3),(e2),(e1),(e0))
-
-#define _mm512_setr4_ps(e0,e1,e2,e3)                \
-  _mm512_set4_ps((e3),(e2),(e1),(e0))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_broadcastsd_pd(__m128d __A)
-{
-  return (__m512d)__builtin_shufflevector((__v2df) __A, (__v2df) __A,
-                                          0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-/* Cast between vector types */
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_castpd256_pd512(__m256d __a)
-{
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_castps256_ps512(__m256 __a)
-{
-  return __builtin_shufflevector(__a, __a, 0,  1,  2,  3,  4,  5,  6,  7,
-                                          -1, -1, -1, -1, -1, -1, -1, -1);
-}
-
-static __inline __m128d __DEFAULT_FN_ATTRS512
-_mm512_castpd512_pd128(__m512d __a)
-{
-  return __builtin_shufflevector(__a, __a, 0, 1);
-}
-
-static __inline __m256d __DEFAULT_FN_ATTRS512
-_mm512_castpd512_pd256 (__m512d __A)
-{
-  return __builtin_shufflevector(__A, __A, 0, 1, 2, 3);
-}
-
-static __inline __m128 __DEFAULT_FN_ATTRS512
-_mm512_castps512_ps128(__m512 __a)
-{
-  return __builtin_shufflevector(__a, __a, 0, 1, 2, 3);
-}
-
-static __inline __m256 __DEFAULT_FN_ATTRS512
-_mm512_castps512_ps256 (__m512 __A)
-{
-  return __builtin_shufflevector(__A, __A, 0, 1, 2, 3, 4, 5, 6, 7);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_castpd_ps (__m512d __A)
-{
-  return (__m512) (__A);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_castpd_si512 (__m512d __A)
-{
-  return (__m512i) (__A);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_castpd128_pd512 (__m128d __A)
-{
-  return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_castps_pd (__m512 __A)
-{
-  return (__m512d) (__A);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_castps_si512 (__m512 __A)
-{
-  return (__m512i) (__A);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_castps128_ps512 (__m128 __A)
-{
-    return  __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_castsi128_si512 (__m128i __A)
-{
-   return  __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_castsi256_si512 (__m256i __A)
-{
-   return  __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_castsi512_ps (__m512i __A)
-{
-  return (__m512) (__A);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_castsi512_pd (__m512i __A)
-{
-  return (__m512d) (__A);
-}
-
-static __inline __m128i __DEFAULT_FN_ATTRS512
-_mm512_castsi512_si128 (__m512i __A)
-{
-  return (__m128i)__builtin_shufflevector(__A, __A , 0, 1);
-}
-
-static __inline __m256i __DEFAULT_FN_ATTRS512
-_mm512_castsi512_si256 (__m512i __A)
-{
-  return (__m256i)__builtin_shufflevector(__A, __A , 0, 1, 2, 3);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_int2mask(int __a)
-{
-  return (__mmask16)__a;
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm512_mask2int(__mmask16 __a)
-{
-  return (int)__a;
-}
-
-/// Constructs a 512-bit floating-point vector of [8 x double] from a
-///    128-bit floating-point vector of [2 x double]. The lower 128 bits
-///    contain the value of the source vector. The upper 384 bits are set
-///    to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 512-bit floating-point vector of [8 x double]. The lower 128 bits
-///    contain the value of the parameter. The upper 384 bits are set to zero.
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_zextpd128_pd512(__m128d __a)
-{
-  return __builtin_shufflevector((__v2df)__a, (__v2df)_mm_setzero_pd(), 0, 1, 2, 3, 2, 3, 2, 3);
-}
-
-/// Constructs a 512-bit floating-point vector of [8 x double] from a
-///    256-bit floating-point vector of [4 x double]. The lower 256 bits
-///    contain the value of the source vector. The upper 256 bits are set
-///    to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit vector of [4 x double].
-/// \returns A 512-bit floating-point vector of [8 x double]. The lower 256 bits
-///    contain the value of the parameter. The upper 256 bits are set to zero.
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_zextpd256_pd512(__m256d __a)
-{
-  return __builtin_shufflevector((__v4df)__a, (__v4df)_mm256_setzero_pd(), 0, 1, 2, 3, 4, 5, 6, 7);
-}
-
-/// Constructs a 512-bit floating-point vector of [16 x float] from a
-///    128-bit floating-point vector of [4 x float]. The lower 128 bits contain
-///    the value of the source vector. The upper 384 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 512-bit floating-point vector of [16 x float]. The lower 128 bits
-///    contain the value of the parameter. The upper 384 bits are set to zero.
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_zextps128_ps512(__m128 __a)
-{
-  return __builtin_shufflevector((__v4sf)__a, (__v4sf)_mm_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7);
-}
-
-/// Constructs a 512-bit floating-point vector of [16 x float] from a
-///    256-bit floating-point vector of [8 x float]. The lower 256 bits contain
-///    the value of the source vector. The upper 256 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit vector of [8 x float].
-/// \returns A 512-bit floating-point vector of [16 x float]. The lower 256 bits
-///    contain the value of the parameter. The upper 256 bits are set to zero.
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_zextps256_ps512(__m256 __a)
-{
-  return __builtin_shufflevector((__v8sf)__a, (__v8sf)_mm256_setzero_ps(), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-}
-
-/// Constructs a 512-bit integer vector from a 128-bit integer vector.
-///    The lower 128 bits contain the value of the source vector. The upper
-///    384 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \returns A 512-bit integer vector. The lower 128 bits contain the value of
-///    the parameter. The upper 384 bits are set to zero.
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_zextsi128_si512(__m128i __a)
-{
-  return __builtin_shufflevector((__v2di)__a, (__v2di)_mm_setzero_si128(), 0, 1, 2, 3, 2, 3, 2, 3);
-}
-
-/// Constructs a 512-bit integer vector from a 256-bit integer vector.
-///    The lower 256 bits contain the value of the source vector. The upper
-///    256 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 256-bit integer vector.
-/// \returns A 512-bit integer vector. The lower 256 bits contain the value of
-///    the parameter. The upper 256 bits are set to zero.
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_zextsi256_si512(__m256i __a)
-{
-  return __builtin_shufflevector((__v4di)__a, (__v4di)_mm256_setzero_si256(), 0, 1, 2, 3, 4, 5, 6, 7);
-}
-
-/* Bitwise operators */
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_and_epi32(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v16su)__a & (__v16su)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_and_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
-                (__v16si) _mm512_and_epi32(__a, __b),
-                (__v16si) __src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_and_epi32(__mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i) _mm512_mask_and_epi32(_mm512_setzero_si512 (),
-                                         __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_and_epi64(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a & (__v8du)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_and_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
-{
-    return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __k,
-                (__v8di) _mm512_and_epi64(__a, __b),
-                (__v8di) __src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_and_epi64(__mmask8 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i) _mm512_mask_and_epi64(_mm512_setzero_si512 (),
-                                         __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_andnot_si512 (__m512i __A, __m512i __B)
-{
-  return (__m512i)(~(__v8du)__A & (__v8du)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_andnot_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i)(~(__v16su)__A & (__v16su)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_andnot_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_andnot_epi32(__A, __B),
-                                         (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_andnot_epi32(__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)_mm512_mask_andnot_epi32(_mm512_setzero_si512(),
-                                           __U, __A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_andnot_epi64(__m512i __A, __m512i __B)
-{
-  return (__m512i)(~(__v8du)__A & (__v8du)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_andnot_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_andnot_epi64(__A, __B),
-                                          (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_andnot_epi64(__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)_mm512_mask_andnot_epi64(_mm512_setzero_si512(),
-                                           __U, __A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_or_epi32(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v16su)__a | (__v16su)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_or_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
-                                             (__v16si)_mm512_or_epi32(__a, __b),
-                                             (__v16si)__src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_or_epi32(__mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)_mm512_mask_or_epi32(_mm512_setzero_si512(), __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_or_epi64(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a | (__v8du)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_or_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k,
-                                             (__v8di)_mm512_or_epi64(__a, __b),
-                                             (__v8di)__src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_or_epi64(__mmask8 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)_mm512_mask_or_epi64(_mm512_setzero_si512(), __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_xor_epi32(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v16su)__a ^ (__v16su)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_xor_epi32(__m512i __src, __mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__k,
-                                            (__v16si)_mm512_xor_epi32(__a, __b),
-                                            (__v16si)__src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_xor_epi32(__mmask16 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)_mm512_mask_xor_epi32(_mm512_setzero_si512(), __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_xor_epi64(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a ^ (__v8du)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_xor_epi64(__m512i __src, __mmask8 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__k,
-                                             (__v8di)_mm512_xor_epi64(__a, __b),
-                                             (__v8di)__src);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_xor_epi64(__mmask8 __k, __m512i __a, __m512i __b)
-{
-  return (__m512i)_mm512_mask_xor_epi64(_mm512_setzero_si512(), __k, __a, __b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_and_si512(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a & (__v8du)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_or_si512(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a | (__v8du)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_xor_si512(__m512i __a, __m512i __b)
-{
-  return (__m512i)((__v8du)__a ^ (__v8du)__b);
-}
-
-/* Arithmetic */
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_add_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)((__v8df)__a + (__v8df)__b);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_add_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)((__v16sf)__a + (__v16sf)__b);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_mul_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)((__v8df)__a * (__v8df)__b);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_mul_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)((__v16sf)__a * (__v16sf)__b);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_sub_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)((__v8df)__a - (__v8df)__b);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_sub_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)((__v16sf)__a - (__v16sf)__b);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_add_epi64 (__m512i __A, __m512i __B)
-{
-  return (__m512i) ((__v8du) __A + (__v8du) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_add_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_add_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_epi64(__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_add_epi64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sub_epi64 (__m512i __A, __m512i __B)
-{
-  return (__m512i) ((__v8du) __A - (__v8du) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_sub_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_epi64(__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_sub_epi64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_add_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i) ((__v16su) __A + (__v16su) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_add_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_add_epi32(__A, __B),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_add_epi32(__A, __B),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sub_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i) ((__v16su) __A - (__v16su) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_sub_epi32(__A, __B),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_epi32(__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_sub_epi32(__A, __B),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-#define _mm512_max_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_maxpd512((__v8df)(__m512d)(A), \
-                                   (__v8df)(__m512d)(B), (int)(R))
-
-#define _mm512_mask_max_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_max_round_pd((A), (B), (R)), \
-                                   (__v8df)(W))
-
-#define _mm512_maskz_max_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_max_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd())
-
-static  __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_max_pd(__m512d __A, __m512d __B)
-{
-  return (__m512d) __builtin_ia32_maxpd512((__v8df) __A, (__v8df) __B,
-                                           _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_max_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_max_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_pd (__mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_max_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-#define _mm512_max_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_maxps512((__v16sf)(__m512)(A), \
-                                  (__v16sf)(__m512)(B), (int)(R))
-
-#define _mm512_mask_max_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_max_round_ps((A), (B), (R)), \
-                                  (__v16sf)(W))
-
-#define _mm512_maskz_max_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_max_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps())
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_max_ps(__m512 __A, __m512 __B)
-{
-  return (__m512) __builtin_ia32_maxps512((__v16sf) __A, (__v16sf) __B,
-                                          _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_max_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_max_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_ps (__mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_max_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_max_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A,
-                (__v4sf) __B,
-                (__v4sf) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_max_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_maxss_round_mask ((__v4sf) __A,
-                (__v4sf) __B,
-                (__v4sf)  _mm_setzero_ps (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_max_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
-
-#define _mm_mask_max_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                          (int)(R))
-
-#define _mm_maskz_max_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_max_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_max_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_maxsd_round_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df)  _mm_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_max_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)-1, (int)(R))
-
-#define _mm_mask_max_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(W), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_max_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline __m512i
-__DEFAULT_FN_ATTRS512
-_mm512_max_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxsd512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_max_epi32(__A, __B),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_max_epi32(__A, __B),
-                                            (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epu32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxud512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_max_epu32(__A, __B),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epu32 (__mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_max_epu32(__A, __B),
-                                            (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epi64(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxsq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_max_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epi64 (__mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_max_epi64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_max_epu64(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pmaxuq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_max_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_max_epu64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_max_epu64 (__mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_max_epu64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-#define _mm512_min_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_minpd512((__v8df)(__m512d)(A), \
-                                   (__v8df)(__m512d)(B), (int)(R))
-
-#define _mm512_mask_min_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_min_round_pd((A), (B), (R)), \
-                                   (__v8df)(W))
-
-#define _mm512_maskz_min_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_min_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd())
-
-static  __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_min_pd(__m512d __A, __m512d __B)
-{
-  return (__m512d) __builtin_ia32_minpd512((__v8df) __A, (__v8df) __B,
-                                           _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_min_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_min_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_pd (__mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_min_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-#define _mm512_min_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_minps512((__v16sf)(__m512)(A), \
-                                  (__v16sf)(__m512)(B), (int)(R))
-
-#define _mm512_mask_min_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_min_round_ps((A), (B), (R)), \
-                                  (__v16sf)(W))
-
-#define _mm512_maskz_min_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_min_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps())
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_min_ps(__m512 __A, __m512 __B)
-{
-  return (__m512) __builtin_ia32_minps512((__v16sf) __A, (__v16sf) __B,
-                                          _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_min_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_min_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_ps (__mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_min_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_min_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A,
-                (__v4sf) __B,
-                (__v4sf) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_min_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  return (__m128) __builtin_ia32_minss_round_mask ((__v4sf) __A,
-                (__v4sf) __B,
-                (__v4sf)  _mm_setzero_ps (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_min_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
-
-#define _mm_mask_min_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                          (int)(R))
-
-#define _mm_maskz_min_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_min_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_min_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  return (__m128d) __builtin_ia32_minsd_round_mask ((__v2df) __A,
-                (__v2df) __B,
-                (__v2df)  _mm_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_min_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)-1, (int)(R))
-
-#define _mm_mask_min_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(W), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_min_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline __m512i
-__DEFAULT_FN_ATTRS512
-_mm512_min_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminsd512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_min_epi32(__A, __B),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_min_epi32(__A, __B),
-                                            (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epu32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminud512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epu32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_min_epu32(__A, __B),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epu32 (__mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                            (__v16si)_mm512_min_epu32(__A, __B),
-                                            (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epi64(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminsq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epi64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_min_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epi64 (__mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_min_epi64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_min_epu64(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_pminuq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_min_epu64 (__m512i __W, __mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_min_epu64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_min_epu64 (__mmask8 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_min_epu64(__A, __B),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mul_epi32(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_pmuldq512((__v16si)__X, (__v16si) __Y);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mul_epi32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_mul_epi32(__X, __Y),
-                                             (__v8di)__W);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mul_epi32(__mmask8 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_mul_epi32(__X, __Y),
-                                             (__v8di)_mm512_setzero_si512 ());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mul_epu32(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_pmuludq512((__v16si)__X, (__v16si)__Y);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mul_epu32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_mul_epu32(__X, __Y),
-                                             (__v8di)__W);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mul_epu32(__mmask8 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                             (__v8di)_mm512_mul_epu32(__X, __Y),
-                                             (__v8di)_mm512_setzero_si512 ());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mullo_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i) ((__v16su) __A * (__v16su) __B);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mullo_epi32(__mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                             (__v16si)_mm512_mullo_epi32(__A, __B),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mullo_epi32(__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                             (__v16si)_mm512_mullo_epi32(__A, __B),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mullox_epi64 (__m512i __A, __m512i __B) {
-  return (__m512i) ((__v8du) __A * (__v8du) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mullox_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_mullox_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-#define _mm512_sqrt_round_pd(A, R) \
-  (__m512d)__builtin_ia32_sqrtpd512((__v8df)(__m512d)(A), (int)(R))
-
-#define _mm512_mask_sqrt_round_pd(W, U, A, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_sqrt_round_pd((A), (R)), \
-                                       (__v8df)(__m512d)(W))
-
-#define _mm512_maskz_sqrt_round_pd(U, A, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_sqrt_round_pd((A), (R)), \
-                                       (__v8df)_mm512_setzero_pd())
-
-static  __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_sqrt_pd(__m512d __A)
-{
-  return (__m512d)__builtin_ia32_sqrtpd512((__v8df)__A,
-                                           _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_sqrt_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_sqrt_pd(__A),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_sqrt_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                              (__v8df)_mm512_sqrt_pd(__A),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-#define _mm512_sqrt_round_ps(A, R) \
-  (__m512)__builtin_ia32_sqrtps512((__v16sf)(__m512)(A), (int)(R))
-
-#define _mm512_mask_sqrt_round_ps(W, U, A, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_sqrt_round_ps((A), (R)), \
-                                      (__v16sf)(__m512)(W))
-
-#define _mm512_maskz_sqrt_round_ps(U, A, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_sqrt_round_ps((A), (R)), \
-                                      (__v16sf)_mm512_setzero_ps())
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_sqrt_ps(__m512 __A)
-{
-  return (__m512)__builtin_ia32_sqrtps512((__v16sf)__A,
-                                          _MM_FROUND_CUR_DIRECTION);
-}
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_sqrt_ps(__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_sqrt_ps(__A),
-                                             (__v16sf)__W);
-}
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_sqrt_ps( __mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                             (__v16sf)_mm512_sqrt_ps(__A),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static  __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_rsqrt14_pd(__m512d __A)
-{
-  return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
-                 (__v8df)
-                 _mm512_setzero_pd (),
-                 (__mmask8) -1);}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_rsqrt14_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
-                  (__v8df) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_rsqrt14_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rsqrt14pd512_mask ((__v8df) __A,
-                  (__v8df)
-                  _mm512_setzero_pd (),
-                  (__mmask8) __U);
-}
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_rsqrt14_ps(__m512 __A)
-{
-  return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
-                (__v16sf)
-                _mm512_setzero_ps (),
-                (__mmask16) -1);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_rsqrt14_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
-                 (__v16sf) __W,
-                 (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_rsqrt14_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rsqrt14ps512_mask ((__v16sf) __A,
-                 (__v16sf)
-                 _mm512_setzero_ps (),
-                 (__mmask16) __U);
-}
-
-static  __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_rsqrt14_ss(__m128 __A, __m128 __B)
-{
-  return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
-             (__v4sf) __B,
-             (__v4sf)
-             _mm_setzero_ps (),
-             (__mmask8) -1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_rsqrt14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) __W,
-          (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_rsqrt14_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rsqrt14ss_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) _mm_setzero_ps (),
-          (__mmask8) __U);
-}
-
-static  __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_rsqrt14_sd(__m128d __A, __m128d __B)
-{
-  return (__m128d) __builtin_ia32_rsqrt14sd_mask ((__v2df) __A,
-              (__v2df) __B,
-              (__v2df)
-              _mm_setzero_pd (),
-              (__mmask8) -1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_rsqrt14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) __W,
-          (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_rsqrt14_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rsqrt14sd_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) _mm_setzero_pd (),
-          (__mmask8) __U);
-}
-
-static  __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_rcp14_pd(__m512d __A)
-{
-  return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
-               (__v8df)
-               _mm512_setzero_pd (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_rcp14_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
-                (__v8df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_rcp14_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rcp14pd512_mask ((__v8df) __A,
-                (__v8df)
-                _mm512_setzero_pd (),
-                (__mmask8) __U);
-}
-
-static  __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_rcp14_ps(__m512 __A)
-{
-  return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
-              (__v16sf)
-              _mm512_setzero_ps (),
-              (__mmask16) -1);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_rcp14_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
-                   (__v16sf) __W,
-                   (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_rcp14_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rcp14ps512_mask ((__v16sf) __A,
-                   (__v16sf)
-                   _mm512_setzero_ps (),
-                   (__mmask16) __U);
-}
-
-static  __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_rcp14_ss(__m128 __A, __m128 __B)
-{
-  return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
-                 (__v4sf) __B,
-                 (__v4sf)
-                 _mm_setzero_ps (),
-                 (__mmask8) -1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_rcp14_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) __W,
-          (__mmask8) __U);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_rcp14_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_rcp14ss_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) _mm_setzero_ps (),
-          (__mmask8) __U);
-}
-
-static  __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_rcp14_sd(__m128d __A, __m128d __B)
-{
-  return (__m128d) __builtin_ia32_rcp14sd_mask ((__v2df) __A,
-            (__v2df) __B,
-            (__v2df)
-            _mm_setzero_pd (),
-            (__mmask8) -1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_rcp14_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) __W,
-          (__mmask8) __U);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_rcp14_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_rcp14sd_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) _mm_setzero_pd (),
-          (__mmask8) __U);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_floor_ps(__m512 __A)
-{
-  return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
-                                                  _MM_FROUND_FLOOR,
-                                                  (__v16sf) __A, -1,
-                                                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_floor_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
-                   _MM_FROUND_FLOOR,
-                   (__v16sf) __W, __U,
-                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_floor_pd(__m512d __A)
-{
-  return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
-                                                   _MM_FROUND_FLOOR,
-                                                   (__v8df) __A, -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_floor_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
-                _MM_FROUND_FLOOR,
-                (__v8df) __W, __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_ceil_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
-                   _MM_FROUND_CEIL,
-                   (__v16sf) __W, __U,
-                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_ceil_ps(__m512 __A)
-{
-  return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
-                                                  _MM_FROUND_CEIL,
-                                                  (__v16sf) __A, -1,
-                                                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_ceil_pd(__m512d __A)
-{
-  return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
-                                                   _MM_FROUND_CEIL,
-                                                   (__v8df) __A, -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_ceil_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
-                _MM_FROUND_CEIL,
-                (__v8df) __W, __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_abs_epi64(__m512i __A)
-{
-  return (__m512i)__builtin_ia32_pabsq512((__v8di)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_abs_epi64(__A),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_abs_epi64 (__mmask8 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_abs_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_abs_epi32(__m512i __A)
-{
-  return (__m512i)__builtin_ia32_pabsd512((__v16si) __A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                             (__v16si)_mm512_abs_epi32(__A),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_abs_epi32 (__mmask16 __U, __m512i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                             (__v16si)_mm512_abs_epi32(__A),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_add_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_add_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_add_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_add_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
-}
-
-#define _mm_add_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
-
-#define _mm_mask_add_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                          (int)(R))
-
-#define _mm_maskz_add_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_add_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_add_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_add_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_add_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
-}
-#define _mm_add_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)-1, (int)(R))
-
-#define _mm_mask_add_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(W), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_add_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_add_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_add_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_pd(__mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_add_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_add_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_add_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_add_ps(__mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_add_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-#define _mm512_add_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_addpd512((__v8df)(__m512d)(A), \
-                                   (__v8df)(__m512d)(B), (int)(R))
-
-#define _mm512_mask_add_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_add_round_pd((A), (B), (R)), \
-                                   (__v8df)(__m512d)(W))
-
-#define _mm512_maskz_add_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_add_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd())
-
-#define _mm512_add_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_addps512((__v16sf)(__m512)(A), \
-                                  (__v16sf)(__m512)(B), (int)(R))
-
-#define _mm512_mask_add_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_add_round_ps((A), (B), (R)), \
-                                  (__v16sf)(__m512)(W))
-
-#define _mm512_maskz_add_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_add_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps())
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_sub_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_sub_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_sub_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_sub_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
-}
-#define _mm_sub_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
-
-#define _mm_mask_sub_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                          (int)(R))
-
-#define _mm_maskz_sub_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_sub_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_sub_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_sub_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_sub_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
-}
-
-#define _mm_sub_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)-1, (int)(R))
-
-#define _mm_mask_sub_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(W), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_sub_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_sub_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_pd(__mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_sub_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_sub_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_sub_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_sub_ps(__mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_sub_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-#define _mm512_sub_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_subpd512((__v8df)(__m512d)(A), \
-                                   (__v8df)(__m512d)(B), (int)(R))
-
-#define _mm512_mask_sub_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_sub_round_pd((A), (B), (R)), \
-                                   (__v8df)(__m512d)(W))
-
-#define _mm512_maskz_sub_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_sub_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd())
-
-#define _mm512_sub_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_subps512((__v16sf)(__m512)(A), \
-                                  (__v16sf)(__m512)(B), (int)(R))
-
-#define _mm512_mask_sub_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_sub_round_ps((A), (B), (R)), \
-                                  (__v16sf)(__m512)(W))
-
-#define _mm512_maskz_sub_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_sub_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps())
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_mul_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_mul_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_mul_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_mul_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
-}
-#define _mm_mul_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
-
-#define _mm_mask_mul_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                          (int)(R))
-
-#define _mm_maskz_mul_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_mul_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_mul_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_mul_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_mul_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
-}
-
-#define _mm_mul_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)-1, (int)(R))
-
-#define _mm_mask_mul_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(W), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_mul_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_mul_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_mul_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_mul_pd(__mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_mul_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_mul_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_mul_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_mul_ps(__mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_mul_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-#define _mm512_mul_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_mulpd512((__v8df)(__m512d)(A), \
-                                   (__v8df)(__m512d)(B), (int)(R))
-
-#define _mm512_mask_mul_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_mul_round_pd((A), (B), (R)), \
-                                   (__v8df)(__m512d)(W))
-
-#define _mm512_maskz_mul_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_mul_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd())
-
-#define _mm512_mul_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_mulps512((__v16sf)(__m512)(A), \
-                                  (__v16sf)(__m512)(B), (int)(R))
-
-#define _mm512_mask_mul_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_mul_round_ps((A), (B), (R)), \
-                                  (__v16sf)(__m512)(W))
-
-#define _mm512_maskz_mul_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_mul_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps())
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_div_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_div_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_div_ss(__mmask8 __U,__m128 __A, __m128 __B) {
-  __A = _mm_div_ss(__A, __B);
-  return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
-}
-
-#define _mm_div_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
-
-#define _mm_mask_div_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                          (int)(R))
-
-#define _mm_maskz_div_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4sf)_mm_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_div_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_div_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_div_sd(__mmask8 __U,__m128d __A, __m128d __B) {
-  __A = _mm_div_sd(__A, __B);
-  return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
-}
-
-#define _mm_div_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)-1, (int)(R))
-
-#define _mm_mask_div_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)(__m128d)(W), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_div_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2df)_mm_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_div_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)((__v8df)__a/(__v8df)__b);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_div_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_div_pd(__A, __B),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_div_pd(__mmask8 __U, __m512d __A, __m512d __B) {
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_div_pd(__A, __B),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_div_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)((__v16sf)__a/(__v16sf)__b);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_div_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_div_ps(__A, __B),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) {
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_div_ps(__A, __B),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-#define _mm512_div_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_divpd512((__v8df)(__m512d)(A), \
-                                   (__v8df)(__m512d)(B), (int)(R))
-
-#define _mm512_mask_div_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_div_round_pd((A), (B), (R)), \
-                                   (__v8df)(__m512d)(W))
-
-#define _mm512_maskz_div_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                   (__v8df)_mm512_div_round_pd((A), (B), (R)), \
-                                   (__v8df)_mm512_setzero_pd())
-
-#define _mm512_div_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_divps512((__v16sf)(__m512)(A), \
-                                  (__v16sf)(__m512)(B), (int)(R))
-
-#define _mm512_mask_div_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_div_round_ps((A), (B), (R)), \
-                                  (__v16sf)(__m512)(W))
-
-#define _mm512_maskz_div_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                  (__v16sf)_mm512_div_round_ps((A), (B), (R)), \
-                                  (__v16sf)_mm512_setzero_ps())
-
-#define _mm512_roundscale_ps(A, B) \
-  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(B), \
-                                         (__v16sf)_mm512_undefined_ps(), \
-                                         (__mmask16)-1, \
-                                         _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_roundscale_ps(A, B, C, imm) \
-  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
-                                         (__v16sf)(__m512)(A), (__mmask16)(B), \
-                                         _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_roundscale_ps(A, B, imm) \
-  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
-                                         (__v16sf)_mm512_setzero_ps(), \
-                                         (__mmask16)(A), \
-                                         _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_roundscale_round_ps(A, B, C, imm, R) \
-  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
-                                         (__v16sf)(__m512)(A), (__mmask16)(B), \
-                                         (int)(R))
-
-#define _mm512_maskz_roundscale_round_ps(A, B, imm, R) \
-  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
-                                         (__v16sf)_mm512_setzero_ps(), \
-                                         (__mmask16)(A), (int)(R))
-
-#define _mm512_roundscale_round_ps(A, imm, R) \
-  (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(imm), \
-                                         (__v16sf)_mm512_undefined_ps(), \
-                                         (__mmask16)-1, (int)(R))
-
-#define _mm512_roundscale_pd(A, B) \
-  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(B), \
-                                          (__v8df)_mm512_undefined_pd(), \
-                                          (__mmask8)-1, \
-                                          _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_roundscale_pd(A, B, C, imm) \
-  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
-                                          (__v8df)(__m512d)(A), (__mmask8)(B), \
-                                          _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_roundscale_pd(A, B, imm) \
-  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
-                                          (__v8df)_mm512_setzero_pd(), \
-                                          (__mmask8)(A), \
-                                          _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_roundscale_round_pd(A, B, C, imm, R) \
-  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
-                                          (__v8df)(__m512d)(A), (__mmask8)(B), \
-                                          (int)(R))
-
-#define _mm512_maskz_roundscale_round_pd(A, B, imm, R) \
-  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
-                                          (__v8df)_mm512_setzero_pd(), \
-                                          (__mmask8)(A), (int)(R))
-
-#define _mm512_roundscale_round_pd(A, imm, R) \
-  (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(imm), \
-                                          (__v8df)_mm512_undefined_pd(), \
-                                          (__mmask8)-1, (int)(R))
-
-#define _mm512_fmadd_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           (__v8df)(__m512d)(C), \
-                                           (__mmask8)-1, (int)(R))
-
-
-#define _mm512_mask_fmadd_round_pd(A, U, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           (__v8df)(__m512d)(C), \
-                                           (__mmask8)(U), (int)(R))
-
-
-#define _mm512_mask3_fmadd_round_pd(A, B, C, U, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask3((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
-
-
-#define _mm512_maskz_fmadd_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
-
-
-#define _mm512_fmsub_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           -(__v8df)(__m512d)(C), \
-                                           (__mmask8)-1, (int)(R))
-
-
-#define _mm512_mask_fmsub_round_pd(A, U, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           -(__v8df)(__m512d)(C), \
-                                           (__mmask8)(U), (int)(R))
-
-
-#define _mm512_maskz_fmsub_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            -(__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
-
-
-#define _mm512_fnmadd_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           (__v8df)(__m512d)(C), \
-                                           (__mmask8)-1, (int)(R))
-
-
-#define _mm512_mask3_fnmadd_round_pd(A, B, C, U, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask3(-(__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
-
-
-#define _mm512_maskz_fnmadd_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
-
-
-#define _mm512_fnmsub_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           -(__v8df)(__m512d)(C), \
-                                           (__mmask8)-1, (int)(R))
-
-
-#define _mm512_maskz_fnmsub_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            -(__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fmadd_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    (__v8df) __B,
-                                                    (__v8df) __C,
-                                                    (__mmask8) -1,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    (__v8df) __B,
-                                                    (__v8df) __C,
-                                                    (__mmask8) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask3 ((__v8df) __A,
-                                                     (__v8df) __B,
-                                                     (__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A,
-                                                     (__v8df) __B,
-                                                     (__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fmsub_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    (__v8df) __B,
-                                                    -(__v8df) __C,
-                                                    (__mmask8) -1,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    (__v8df) __B,
-                                                    -(__v8df) __C,
-                                                    (__mmask8) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A,
-                                                     (__v8df) __B,
-                                                     -(__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    -(__v8df) __B,
-                                                    (__v8df) __C,
-                                                    (__mmask8) -1,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask3 (-(__v8df) __A,
-                                                     (__v8df) __B,
-                                                     (__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fnmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A,
-                                                     (__v8df) __B,
-                                                     (__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    -(__v8df) __B,
-                                                    -(__v8df) __C,
-                                                    (__mmask8) -1,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A,
-                                                     (__v8df) __B,
-                                                     -(__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_fmadd_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          (__v16sf)(__m512)(C), \
-                                          (__mmask16)-1, (int)(R))
-
-
-#define _mm512_mask_fmadd_round_ps(A, U, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          (__v16sf)(__m512)(C), \
-                                          (__mmask16)(U), (int)(R))
-
-
-#define _mm512_mask3_fmadd_round_ps(A, B, C, U, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask3((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
-
-
-#define _mm512_maskz_fmadd_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
-
-
-#define _mm512_fmsub_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          -(__v16sf)(__m512)(C), \
-                                          (__mmask16)-1, (int)(R))
-
-
-#define _mm512_mask_fmsub_round_ps(A, U, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          -(__v16sf)(__m512)(C), \
-                                          (__mmask16)(U), (int)(R))
-
-
-#define _mm512_maskz_fmsub_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           -(__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
-
-
-#define _mm512_fnmadd_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          -(__v16sf)(__m512)(B), \
-                                          (__v16sf)(__m512)(C), \
-                                          (__mmask16)-1, (int)(R))
-
-
-#define _mm512_mask3_fnmadd_round_ps(A, B, C, U, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask3(-(__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
-
-
-#define _mm512_maskz_fnmadd_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
-
-
-#define _mm512_fnmsub_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          -(__v16sf)(__m512)(B), \
-                                          -(__v16sf)(__m512)(C), \
-                                          (__mmask16)-1, (int)(R))
-
-
-#define _mm512_maskz_fnmsub_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           -(__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
-
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fmadd_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   (__v16sf) __B,
-                                                   (__v16sf) __C,
-                                                   (__mmask16) -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   (__v16sf) __B,
-                                                   (__v16sf) __C,
-                                                   (__mmask16) __U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask3 ((__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    (__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    (__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fmsub_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   (__v16sf) __B,
-                                                   -(__v16sf) __C,
-                                                   (__mmask16) -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   (__v16sf) __B,
-                                                   -(__v16sf) __C,
-                                                   (__mmask16) __U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    -(__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   -(__v16sf) __B,
-                                                   (__v16sf) __C,
-                                                   (__mmask16) -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask3 (-(__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    (__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fnmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    (__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   -(__v16sf) __B,
-                                                   -(__v16sf) __C,
-                                                   (__mmask16) -1,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fnmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    -(__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_fmaddsub_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              (__v8df)(__m512d)(C), \
-                                              (__mmask8)-1, (int)(R))
-
-
-#define _mm512_mask_fmaddsub_round_pd(A, U, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              (__v8df)(__m512d)(C), \
-                                              (__mmask8)(U), (int)(R))
-
-
-#define _mm512_mask3_fmaddsub_round_pd(A, B, C, U, R) \
-  (__m512d)__builtin_ia32_vfmaddsubpd512_mask3((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               (__v8df)(__m512d)(C), \
-                                               (__mmask8)(U), (int)(R))
-
-
-#define _mm512_maskz_fmaddsub_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               (__v8df)(__m512d)(C), \
-                                               (__mmask8)(U), (int)(R))
-
-
-#define _mm512_fmsubadd_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              -(__v8df)(__m512d)(C), \
-                                              (__mmask8)-1, (int)(R))
-
-
-#define _mm512_mask_fmsubadd_round_pd(A, U, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              -(__v8df)(__m512d)(C), \
-                                              (__mmask8)(U), (int)(R))
-
-
-#define _mm512_maskz_fmsubadd_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               -(__v8df)(__m512d)(C), \
-                                               (__mmask8)(U), (int)(R))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
-                                                      (__v8df) __B,
-                                                      (__v8df) __C,
-                                                      (__mmask8) -1,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fmaddsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
-                                                      (__v8df) __B,
-                                                      (__v8df) __C,
-                                                      (__mmask8) __U,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_mask3 ((__v8df) __A,
-                                                       (__v8df) __B,
-                                                       (__v8df) __C,
-                                                       (__mmask8) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmaddsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A,
-                                                       (__v8df) __B,
-                                                       (__v8df) __C,
-                                                       (__mmask8) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
-                                                       (__v8df) __B,
-                                                       -(__v8df) __C,
-                                                       (__mmask8) -1,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fmsubadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
-                                                       (__v8df) __B,
-                                                       -(__v8df) __C,
-                                                       (__mmask8) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmsubadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A,
-                                                        (__v8df) __B,
-                                                        -(__v8df) __C,
-                                                        (__mmask8) __U,
-                                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_fmaddsub_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             (__v16sf)(__m512)(C), \
-                                             (__mmask16)-1, (int)(R))
-
-
-#define _mm512_mask_fmaddsub_round_ps(A, U, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             (__v16sf)(__m512)(C), \
-                                             (__mmask16)(U), (int)(R))
-
-
-#define _mm512_mask3_fmaddsub_round_ps(A, B, C, U, R) \
-  (__m512)__builtin_ia32_vfmaddsubps512_mask3((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              (__v16sf)(__m512)(C), \
-                                              (__mmask16)(U), (int)(R))
-
-
-#define _mm512_maskz_fmaddsub_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              (__v16sf)(__m512)(C), \
-                                              (__mmask16)(U), (int)(R))
-
-
-#define _mm512_fmsubadd_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             -(__v16sf)(__m512)(C), \
-                                             (__mmask16)-1, (int)(R))
-
-
-#define _mm512_mask_fmsubadd_round_ps(A, U, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             -(__v16sf)(__m512)(C), \
-                                             (__mmask16)(U), (int)(R))
-
-
-#define _mm512_maskz_fmsubadd_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              -(__v16sf)(__m512)(C), \
-                                              (__mmask16)(U), (int)(R))
-
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
-                                                      (__v16sf) __B,
-                                                      (__v16sf) __C,
-                                                      (__mmask16) -1,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fmaddsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
-                                                      (__v16sf) __B,
-                                                      (__v16sf) __C,
-                                                      (__mmask16) __U,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_mask3 ((__v16sf) __A,
-                                                       (__v16sf) __B,
-                                                       (__v16sf) __C,
-                                                       (__mmask16) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmaddsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A,
-                                                       (__v16sf) __B,
-                                                       (__v16sf) __C,
-                                                       (__mmask16) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
-                                                      (__v16sf) __B,
-                                                      -(__v16sf) __C,
-                                                      (__mmask16) -1,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fmsubadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
-                                                      (__v16sf) __B,
-                                                      -(__v16sf) __C,
-                                                      (__mmask16) __U,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_fmsubadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A,
-                                                       (__v16sf) __B,
-                                                       -(__v16sf) __C,
-                                                       (__mmask16) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask3_fmsub_round_pd(A, B, C, U, R) \
-  (__m512d)__builtin_ia32_vfmsubpd512_mask3((__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d)__builtin_ia32_vfmsubpd512_mask3 ((__v8df) __A,
-                                                    (__v8df) __B,
-                                                    (__v8df) __C,
-                                                    (__mmask8) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask3_fmsub_round_ps(A, B, C, U, R) \
-  (__m512)__builtin_ia32_vfmsubps512_mask3((__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512)__builtin_ia32_vfmsubps512_mask3 ((__v16sf) __A,
-                                                   (__v16sf) __B,
-                                                   (__v16sf) __C,
-                                                   (__mmask16) __U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask3_fmsubadd_round_pd(A, B, C, U, R) \
-  (__m512d)__builtin_ia32_vfmsubaddpd512_mask3((__v8df)(__m512d)(A), \
-                                               (__v8df)(__m512d)(B), \
-                                               (__v8df)(__m512d)(C), \
-                                               (__mmask8)(U), (int)(R))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d)__builtin_ia32_vfmsubaddpd512_mask3 ((__v8df) __A,
-                                                       (__v8df) __B,
-                                                       (__v8df) __C,
-                                                       (__mmask8) __U,
-                                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask3_fmsubadd_round_ps(A, B, C, U, R) \
-  (__m512)__builtin_ia32_vfmsubaddps512_mask3((__v16sf)(__m512)(A), \
-                                              (__v16sf)(__m512)(B), \
-                                              (__v16sf)(__m512)(C), \
-                                              (__mmask16)(U), (int)(R))
-
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512)__builtin_ia32_vfmsubaddps512_mask3 ((__v16sf) __A,
-                                                      (__v16sf) __B,
-                                                      (__v16sf) __C,
-                                                      (__mmask16) __U,
-                                                      _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask_fnmadd_round_pd(A, U, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                           -(__v8df)(__m512d)(B), \
-                                           (__v8df)(__m512d)(C), \
-                                           (__mmask8)(U), (int)(R))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fnmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    -(__v8df) __B,
-                                                    (__v8df) __C,
-                                                    (__mmask8) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask_fnmadd_round_ps(A, U, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          -(__v16sf)(__m512)(B), \
-                                          (__v16sf)(__m512)(C), \
-                                          (__mmask16)(U), (int)(R))
-
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fnmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   -(__v16sf) __B,
-                                                   (__v16sf) __C,
-                                                   (__mmask16) __U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask_fnmsub_round_pd(A, U, B, C, R) \
-  (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
-                                           -(__v8df)(__m512d)(B), \
-                                           -(__v8df)(__m512d)(C), \
-                                           (__mmask8)(U), (int)(R))
-
-
-#define _mm512_mask3_fnmsub_round_pd(A, B, C, U, R) \
-  (__m512d)__builtin_ia32_vfmsubpd512_mask3(-(__v8df)(__m512d)(A), \
-                                            (__v8df)(__m512d)(B), \
-                                            (__v8df)(__m512d)(C), \
-                                            (__mmask8)(U), (int)(R))
-
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_fnmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
-{
-  return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
-                                                    -(__v8df) __B,
-                                                    -(__v8df) __C,
-                                                    (__mmask8) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask3_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
-{
-  return (__m512d) __builtin_ia32_vfmsubpd512_mask3 (-(__v8df) __A,
-                                                     (__v8df) __B,
-                                                     (__v8df) __C,
-                                                     (__mmask8) __U,
-                                                     _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_mask_fnmsub_round_ps(A, U, B, C, R) \
-  (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
-                                          -(__v16sf)(__m512)(B), \
-                                          -(__v16sf)(__m512)(C), \
-                                          (__mmask16)(U), (int)(R))
-
-
-#define _mm512_mask3_fnmsub_round_ps(A, B, C, U, R) \
-  (__m512)__builtin_ia32_vfmsubps512_mask3(-(__v16sf)(__m512)(A), \
-                                           (__v16sf)(__m512)(B), \
-                                           (__v16sf)(__m512)(C), \
-                                           (__mmask16)(U), (int)(R))
-
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_fnmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
-{
-  return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
-                                                   -(__v16sf) __B,
-                                                   -(__v16sf) __C,
-                                                   (__mmask16) __U,
-                                                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask3_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
-{
-  return (__m512) __builtin_ia32_vfmsubps512_mask3 (-(__v16sf) __A,
-                                                    (__v16sf) __B,
-                                                    (__v16sf) __C,
-                                                    (__mmask16) __U,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-
-
-/* Vector permutations */
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutex2var_epi32(__m512i __A, __m512i __I, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_vpermi2vard512((__v16si)__A, (__v16si) __I,
-                                                (__v16si) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutex2var_epi32(__m512i __A, __mmask16 __U, __m512i __I,
-                               __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                              (__v16si)_mm512_permutex2var_epi32(__A, __I, __B),
-                              (__v16si)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask2_permutex2var_epi32(__m512i __A, __m512i __I, __mmask16 __U,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                              (__v16si)_mm512_permutex2var_epi32(__A, __I, __B),
-                              (__v16si)__I);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutex2var_epi32(__mmask16 __U, __m512i __A, __m512i __I,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                              (__v16si)_mm512_permutex2var_epi32(__A, __I, __B),
-                              (__v16si)_mm512_setzero_si512());
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutex2var_epi64(__m512i __A, __m512i __I, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_vpermi2varq512((__v8di)__A, (__v8di) __I,
-                                                (__v8di) __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutex2var_epi64(__m512i __A, __mmask8 __U, __m512i __I,
-                               __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                               (__v8di)_mm512_permutex2var_epi64(__A, __I, __B),
-                               (__v8di)__A);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask2_permutex2var_epi64(__m512i __A, __m512i __I, __mmask8 __U,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                               (__v8di)_mm512_permutex2var_epi64(__A, __I, __B),
-                               (__v8di)__I);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutex2var_epi64(__mmask8 __U, __m512i __A, __m512i __I,
-                                __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                               (__v8di)_mm512_permutex2var_epi64(__A, __I, __B),
-                               (__v8di)_mm512_setzero_si512());
-}
-
-#define _mm512_alignr_epi64(A, B, I) \
-  (__m512i)__builtin_ia32_alignq512((__v8di)(__m512i)(A), \
-                                    (__v8di)(__m512i)(B), (int)(I))
-
-#define _mm512_mask_alignr_epi64(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                 (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
-                                 (__v8di)(__m512i)(W))
-
-#define _mm512_maskz_alignr_epi64(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                 (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
-                                 (__v8di)_mm512_setzero_si512())
-
-#define _mm512_alignr_epi32(A, B, I) \
-  (__m512i)__builtin_ia32_alignd512((__v16si)(__m512i)(A), \
-                                    (__v16si)(__m512i)(B), (int)(I))
-
-#define _mm512_mask_alignr_epi32(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
-                                (__v16si)(__m512i)(W))
-
-#define _mm512_maskz_alignr_epi32(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
-                                (__v16si)_mm512_setzero_si512())
-/* Vector Extract */
-
-#define _mm512_extractf64x4_pd(A, I) \
-  (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
-                                            (__v4df)_mm256_undefined_pd(), \
-                                            (__mmask8)-1)
-
-#define _mm512_mask_extractf64x4_pd(W, U, A, imm) \
-  (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
-                                            (__v4df)(__m256d)(W), \
-                                            (__mmask8)(U))
-
-#define _mm512_maskz_extractf64x4_pd(U, A, imm) \
-  (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
-                                            (__v4df)_mm256_setzero_pd(), \
-                                            (__mmask8)(U))
-
-#define _mm512_extractf32x4_ps(A, I) \
-  (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \
-                                           (__v4sf)_mm_undefined_ps(), \
-                                           (__mmask8)-1)
-
-#define _mm512_mask_extractf32x4_ps(W, U, A, imm) \
-  (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
-                                           (__v4sf)(__m128)(W), \
-                                           (__mmask8)(U))
-
-#define _mm512_maskz_extractf32x4_ps(U, A, imm) \
-  (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)(U))
-
-/* Vector Blend */
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_pd(__mmask8 __U, __m512d __A, __m512d __W)
-{
-  return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
-                 (__v8df) __W,
-                 (__v8df) __A);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_ps(__mmask16 __U, __m512 __A, __m512 __W)
-{
-  return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
-                (__v16sf) __W,
-                (__v16sf) __A);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_epi64(__mmask8 __U, __m512i __A, __m512i __W)
-{
-  return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
-                (__v8di) __W,
-                (__v8di) __A);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
-{
-  return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
-                (__v16si) __W,
-                (__v16si) __A);
-}
-
-/* Compare */
-
-#define _mm512_cmp_round_ps_mask(A, B, P, R) \
-  (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), (int)(P), \
-                                          (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_cmp_round_ps_mask(U, A, B, P, R) \
-  (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), (int)(P), \
-                                          (__mmask16)(U), (int)(R))
-
-#define _mm512_cmp_ps_mask(A, B, P) \
-  _mm512_cmp_round_ps_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
-#define _mm512_mask_cmp_ps_mask(U, A, B, P) \
-  _mm512_mask_cmp_round_ps_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_cmpeq_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_EQ_OQ)
-#define _mm512_mask_cmpeq_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_EQ_OQ)
-
-#define _mm512_cmplt_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_LT_OS)
-#define _mm512_mask_cmplt_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_LT_OS)
-
-#define _mm512_cmple_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_LE_OS)
-#define _mm512_mask_cmple_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_LE_OS)
-
-#define _mm512_cmpunord_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_UNORD_Q)
-#define _mm512_mask_cmpunord_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_UNORD_Q)
-
-#define _mm512_cmpneq_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_NEQ_UQ)
-#define _mm512_mask_cmpneq_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NEQ_UQ)
-
-#define _mm512_cmpnlt_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_NLT_US)
-#define _mm512_mask_cmpnlt_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NLT_US)
-
-#define _mm512_cmpnle_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_NLE_US)
-#define _mm512_mask_cmpnle_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NLE_US)
-
-#define _mm512_cmpord_ps_mask(A, B) \
-    _mm512_cmp_ps_mask((A), (B), _CMP_ORD_Q)
-#define _mm512_mask_cmpord_ps_mask(k, A, B) \
-    _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_ORD_Q)
-
-#define _mm512_cmp_round_pd_mask(A, B, P, R) \
-  (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
-                                         (__v8df)(__m512d)(B), (int)(P), \
-                                         (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_cmp_round_pd_mask(U, A, B, P, R) \
-  (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
-                                         (__v8df)(__m512d)(B), (int)(P), \
-                                         (__mmask8)(U), (int)(R))
-
-#define _mm512_cmp_pd_mask(A, B, P) \
-  _mm512_cmp_round_pd_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
-#define _mm512_mask_cmp_pd_mask(U, A, B, P) \
-  _mm512_mask_cmp_round_pd_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_cmpeq_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_EQ_OQ)
-#define _mm512_mask_cmpeq_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_EQ_OQ)
-
-#define _mm512_cmplt_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_LT_OS)
-#define _mm512_mask_cmplt_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_LT_OS)
-
-#define _mm512_cmple_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_LE_OS)
-#define _mm512_mask_cmple_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_LE_OS)
-
-#define _mm512_cmpunord_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_UNORD_Q)
-#define _mm512_mask_cmpunord_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_UNORD_Q)
-
-#define _mm512_cmpneq_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_NEQ_UQ)
-#define _mm512_mask_cmpneq_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NEQ_UQ)
-
-#define _mm512_cmpnlt_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_NLT_US)
-#define _mm512_mask_cmpnlt_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NLT_US)
-
-#define _mm512_cmpnle_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_NLE_US)
-#define _mm512_mask_cmpnle_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NLE_US)
-
-#define _mm512_cmpord_pd_mask(A, B) \
-    _mm512_cmp_pd_mask((A), (B), _CMP_ORD_Q)
-#define _mm512_mask_cmpord_pd_mask(k, A, B) \
-    _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_ORD_Q)
-
-/* Conversion */
-
-#define _mm512_cvtt_roundps_epu32(A, R) \
-  (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
-                                             (__v16si)_mm512_undefined_epi32(), \
-                                             (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_cvtt_roundps_epu32(W, U, A, R) \
-  (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
-                                             (__v16si)(__m512i)(W), \
-                                             (__mmask16)(U), (int)(R))
-
-#define _mm512_maskz_cvtt_roundps_epu32(U, A, R) \
-  (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
-                                             (__v16si)_mm512_setzero_si512(), \
-                                             (__mmask16)(U), (int)(R))
-
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvttps_epu32(__m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
-                  (__v16si)
-                  _mm512_setzero_si512 (),
-                  (__mmask16) -1,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttps_epu32 (__m512i __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
-                   (__v16si) __W,
-                   (__mmask16) __U,
-                   _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttps_epu32 (__mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvttps2udq512_mask ((__v16sf) __A,
-                   (__v16si) _mm512_setzero_si512 (),
-                   (__mmask16) __U,
-                   _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundepi32_ps(A, R) \
-  (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
-                                          (__v16sf)_mm512_setzero_ps(), \
-                                          (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_cvt_roundepi32_ps(W, U, A, R) \
-  (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
-                                          (__v16sf)(__m512)(W), \
-                                          (__mmask16)(U), (int)(R))
-
-#define _mm512_maskz_cvt_roundepi32_ps(U, A, R) \
-  (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
-                                          (__v16sf)_mm512_setzero_ps(), \
-                                          (__mmask16)(U), (int)(R))
-
-#define _mm512_cvt_roundepu32_ps(A, R) \
-  (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_cvt_roundepu32_ps(W, U, A, R) \
-  (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
-                                           (__v16sf)(__m512)(W), \
-                                           (__mmask16)(U), (int)(R))
-
-#define _mm512_maskz_cvt_roundepu32_ps(U, A, R) \
-  (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)(U), (int)(R))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_cvtepu32_ps (__m512i __A)
-{
-  return (__m512)__builtin_convertvector((__v16su)__A, __v16sf);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu32_ps (__m512 __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_cvtepu32_ps(__A),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu32_ps (__mmask16 __U, __m512i __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_cvtepu32_ps(__A),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_pd(__m256i __A)
-{
-  return (__m512d)__builtin_convertvector((__v8si)__A, __v8df);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_pd (__m512d __W, __mmask8 __U, __m256i __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                              (__v8df)_mm512_cvtepi32_pd(__A),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_pd (__mmask8 __U, __m256i __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                              (__v8df)_mm512_cvtepi32_pd(__A),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32lo_pd(__m512i __A)
-{
-  return (__m512d) _mm512_cvtepi32_pd(_mm512_castsi512_si256(__A));
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32lo_pd(__m512d __W, __mmask8 __U,__m512i __A)
-{
-  return (__m512d) _mm512_mask_cvtepi32_pd(__W, __U, _mm512_castsi512_si256(__A));
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_ps (__m512i __A)
-{
-  return (__m512)__builtin_convertvector((__v16si)__A, __v16sf);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_ps (__m512 __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_cvtepi32_ps(__A),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_ps (__mmask16 __U, __m512i __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_cvtepi32_ps(__A),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtepu32_pd(__m256i __A)
-{
-  return (__m512d)__builtin_convertvector((__v8su)__A, __v8df);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu32_pd (__m512d __W, __mmask8 __U, __m256i __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                              (__v8df)_mm512_cvtepu32_pd(__A),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu32_pd (__mmask8 __U, __m256i __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                              (__v8df)_mm512_cvtepu32_pd(__A),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtepu32lo_pd(__m512i __A)
-{
-  return (__m512d) _mm512_cvtepu32_pd(_mm512_castsi512_si256(__A));
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu32lo_pd(__m512d __W, __mmask8 __U,__m512i __A)
-{
-  return (__m512d) _mm512_mask_cvtepu32_pd(__W, __U, _mm512_castsi512_si256(__A));
-}
-
-#define _mm512_cvt_roundpd_ps(A, R) \
-  (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
-                                          (__v8sf)_mm256_setzero_ps(), \
-                                          (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_cvt_roundpd_ps(W, U, A, R) \
-  (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
-                                          (__v8sf)(__m256)(W), (__mmask8)(U), \
-                                          (int)(R))
-
-#define _mm512_maskz_cvt_roundpd_ps(U, A, R) \
-  (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
-                                          (__v8sf)_mm256_setzero_ps(), \
-                                          (__mmask8)(U), (int)(R))
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS512
-_mm512_cvtpd_ps (__m512d __A)
-{
-  return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
-                (__v8sf) _mm256_undefined_ps (),
-                (__mmask8) -1,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpd_ps (__m256 __W, __mmask8 __U, __m512d __A)
-{
-  return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
-                (__v8sf) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256 __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtpd_ps (__mmask8 __U, __m512d __A)
-{
-  return (__m256) __builtin_ia32_cvtpd2ps512_mask ((__v8df) __A,
-                (__v8sf) _mm256_setzero_ps (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_cvtpd_pslo (__m512d __A)
-{
-  return (__m512) __builtin_shufflevector((__v8sf) _mm512_cvtpd_ps(__A),
-                (__v8sf) _mm256_setzero_ps (),
-                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpd_pslo (__m512 __W, __mmask8 __U,__m512d __A)
-{
-  return (__m512) __builtin_shufflevector (
-                (__v8sf) _mm512_mask_cvtpd_ps (_mm512_castps512_ps256(__W),
-                                               __U, __A),
-                (__v8sf) _mm256_setzero_ps (),
-                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-}
-
-#define _mm512_cvt_roundps_ph(A, I) \
-  (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
-                                            (__v16hi)_mm256_undefined_si256(), \
-                                            (__mmask16)-1)
-
-#define _mm512_mask_cvt_roundps_ph(U, W, A, I) \
-  (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
-                                            (__v16hi)(__m256i)(U), \
-                                            (__mmask16)(W))
-
-#define _mm512_maskz_cvt_roundps_ph(W, A, I) \
-  (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
-                                            (__v16hi)_mm256_setzero_si256(), \
-                                            (__mmask16)(W))
-
-#define _mm512_cvtps_ph       _mm512_cvt_roundps_ph
-#define _mm512_mask_cvtps_ph  _mm512_mask_cvt_roundps_ph
-#define _mm512_maskz_cvtps_ph _mm512_maskz_cvt_roundps_ph
-
-#define _mm512_cvt_roundph_ps(A, R) \
-  (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
-                                           (__v16sf)_mm512_undefined_ps(), \
-                                           (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_cvt_roundph_ps(W, U, A, R) \
-  (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
-                                           (__v16sf)(__m512)(W), \
-                                           (__mmask16)(U), (int)(R))
-
-#define _mm512_maskz_cvt_roundph_ps(U, A, R) \
-  (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)(U), (int)(R))
-
-
-static  __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_cvtph_ps(__m256i __A)
-{
-  return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
-                (__v16sf)
-                _mm512_setzero_ps (),
-                (__mmask16) -1,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtph_ps (__m512 __W, __mmask16 __U, __m256i __A)
-{
-  return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
-                 (__v16sf) __W,
-                 (__mmask16) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtph_ps (__mmask16 __U, __m256i __A)
-{
-  return (__m512) __builtin_ia32_vcvtph2ps512_mask ((__v16hi) __A,
-                 (__v16sf) _mm512_setzero_ps (),
-                 (__mmask16) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvtt_roundpd_epi32(A, R) \
-  (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)_mm256_setzero_si256(), \
-                                            (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_cvtt_roundpd_epi32(W, U, A, R) \
-  (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)(__m256i)(W), \
-                                            (__mmask8)(U), (int)(R))
-
-#define _mm512_maskz_cvtt_roundpd_epi32(U, A, R) \
-  (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)_mm256_setzero_si256(), \
-                                            (__mmask8)(U), (int)(R))
-
-static __inline __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvttpd_epi32(__m512d __a)
-{
-  return (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df) __a,
-                                                   (__v8si)_mm256_setzero_si256(),
-                                                   (__mmask8) -1,
-                                                    _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A,
-                  (__v8si) __W,
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttpd_epi32 (__mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvttpd2dq512_mask ((__v8df) __A,
-                  (__v8si) _mm256_setzero_si256 (),
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvtt_roundps_epi32(A, R) \
-  (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)_mm512_setzero_si512(), \
-                                            (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_cvtt_roundps_epi32(W, U, A, R) \
-  (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)(__m512i)(W), \
-                                            (__mmask16)(U), (int)(R))
-
-#define _mm512_maskz_cvtt_roundps_epi32(U, A, R) \
-  (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)_mm512_setzero_si512(), \
-                                            (__mmask16)(U), (int)(R))
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvttps_epi32(__m512 __a)
-{
-  return (__m512i)
-    __builtin_ia32_cvttps2dq512_mask((__v16sf) __a,
-                                     (__v16si) _mm512_setzero_si512 (),
-                                     (__mmask16) -1, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttps_epi32 (__m512i __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A,
-                  (__v16si) __W,
-                  (__mmask16) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttps_epi32 (__mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvttps2dq512_mask ((__v16sf) __A,
-                  (__v16si) _mm512_setzero_si512 (),
-                  (__mmask16) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundps_epi32(A, R) \
-  (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
-                                           (__v16si)_mm512_setzero_si512(), \
-                                           (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_cvt_roundps_epi32(W, U, A, R) \
-  (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
-                                           (__v16si)(__m512i)(W), \
-                                           (__mmask16)(U), (int)(R))
-
-#define _mm512_maskz_cvt_roundps_epi32(U, A, R) \
-  (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
-                                           (__v16si)_mm512_setzero_si512(), \
-                                           (__mmask16)(U), (int)(R))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtps_epi32 (__m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
-                 (__v16si) _mm512_undefined_epi32 (),
-                 (__mmask16) -1,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtps_epi32 (__m512i __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
-                 (__v16si) __W,
-                 (__mmask16) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtps_epi32 (__mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
-                 (__v16si)
-                 _mm512_setzero_si512 (),
-                 (__mmask16) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundpd_epi32(A, R) \
-  (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
-                                           (__v8si)_mm256_setzero_si256(), \
-                                           (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_cvt_roundpd_epi32(W, U, A, R) \
-  (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
-                                           (__v8si)(__m256i)(W), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm512_maskz_cvt_roundpd_epi32(U, A, R) \
-  (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
-                                           (__v8si)_mm256_setzero_si256(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtpd_epi32 (__m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
-                 (__v8si)
-                 _mm256_undefined_si256 (),
-                 (__mmask8) -1,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpd_epi32 (__m256i __W, __mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
-                 (__v8si) __W,
-                 (__mmask8) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtpd_epi32 (__mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2dq512_mask ((__v8df) __A,
-                 (__v8si)
-                 _mm256_setzero_si256 (),
-                 (__mmask8) __U,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundps_epu32(A, R) \
-  (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)_mm512_setzero_si512(), \
-                                            (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_cvt_roundps_epu32(W, U, A, R) \
-  (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)(__m512i)(W), \
-                                            (__mmask16)(U), (int)(R))
-
-#define _mm512_maskz_cvt_roundps_epu32(U, A, R) \
-  (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
-                                            (__v16si)_mm512_setzero_si512(), \
-                                            (__mmask16)(U), (int)(R))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtps_epu32 ( __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,\
-                  (__v16si)\
-                  _mm512_undefined_epi32 (),
-                  (__mmask16) -1,\
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtps_epu32 (__m512i __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,
-                  (__v16si) __W,
-                  (__mmask16) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtps_epu32 ( __mmask16 __U, __m512 __A)
-{
-  return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A,
-                  (__v16si)
-                  _mm512_setzero_si512 (),
-                  (__mmask16) __U ,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_cvt_roundpd_epu32(A, R) \
-  (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)_mm256_setzero_si256(), \
-                                            (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_cvt_roundpd_epu32(W, U, A, R) \
-  (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)(__m256i)(W), \
-                                            (__mmask8)(U), (int)(R))
-
-#define _mm512_maskz_cvt_roundpd_epu32(U, A, R) \
-  (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
-                                            (__v8si)_mm256_setzero_si256(), \
-                                            (__mmask8)(U), (int)(R))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtpd_epu32 (__m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
-                  (__v8si)
-                  _mm256_undefined_si256 (),
-                  (__mmask8) -1,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
-                  (__v8si) __W,
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtpd_epu32 (__mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvtpd2udq512_mask ((__v8df) __A,
-                  (__v8si)
-                  _mm256_setzero_si256 (),
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_cvtsd_f64(__m512d __a)
-{
-  return __a[0];
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_cvtss_f32(__m512 __a)
-{
-  return __a[0];
-}
-
-/* Unpack and Interleave */
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b,
-                                          1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                           (__v8df)_mm512_unpackhi_pd(__A, __B),
-                                           (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_pd(__mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                           (__v8df)_mm512_unpackhi_pd(__A, __B),
-                                           (__v8df)_mm512_setzero_pd());
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_pd(__m512d __a, __m512d __b)
-{
-  return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b,
-                                          0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                           (__v8df)_mm512_unpacklo_pd(__A, __B),
-                                           (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_pd (__mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
-                                           (__v8df)_mm512_unpacklo_pd(__A, __B),
-                                           (__v8df)_mm512_setzero_pd());
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b,
-                                         2,    18,    3,    19,
-                                         2+4,  18+4,  3+4,  19+4,
-                                         2+8,  18+8,  3+8,  19+8,
-                                         2+12, 18+12, 3+12, 19+12);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
-                                          (__v16sf)_mm512_unpackhi_ps(__A, __B),
-                                          (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_ps (__mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
-                                          (__v16sf)_mm512_unpackhi_ps(__A, __B),
-                                          (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_ps(__m512 __a, __m512 __b)
-{
-  return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b,
-                                         0,    16,    1,    17,
-                                         0+4,  16+4,  1+4,  17+4,
-                                         0+8,  16+8,  1+8,  17+8,
-                                         0+12, 16+12, 1+12, 17+12);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
-                                          (__v16sf)_mm512_unpacklo_ps(__A, __B),
-                                          (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_ps (__mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16) __U,
-                                          (__v16sf)_mm512_unpacklo_ps(__A, __B),
-                                          (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B,
-                                          2,    18,    3,    19,
-                                          2+4,  18+4,  3+4,  19+4,
-                                          2+8,  18+8,  3+8,  19+8,
-                                          2+12, 18+12, 3+12, 19+12);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
-                                       (__v16si)_mm512_unpackhi_epi32(__A, __B),
-                                       (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_epi32(__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
-                                       (__v16si)_mm512_unpackhi_epi32(__A, __B),
-                                       (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_epi32(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_shufflevector((__v16si)__A, (__v16si)__B,
-                                          0,    16,    1,    17,
-                                          0+4,  16+4,  1+4,  17+4,
-                                          0+8,  16+8,  1+8,  17+8,
-                                          0+12, 16+12, 1+12, 17+12);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
-                                       (__v16si)_mm512_unpacklo_epi32(__A, __B),
-                                       (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_epi32(__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16) __U,
-                                       (__v16si)_mm512_unpacklo_epi32(__A, __B),
-                                       (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpackhi_epi64(__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B,
-                                          1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpackhi_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
-                                        (__v8di)_mm512_unpackhi_epi64(__A, __B),
-                                        (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpackhi_epi64(__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
-                                        (__v8di)_mm512_unpackhi_epi64(__A, __B),
-                                        (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_unpacklo_epi64 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_shufflevector((__v8di)__A, (__v8di)__B,
-                                          0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_unpacklo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
-                                        (__v8di)_mm512_unpacklo_epi64(__A, __B),
-                                        (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_unpacklo_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8) __U,
-                                        (__v8di)_mm512_unpacklo_epi64(__A, __B),
-                                        (__v8di)_mm512_setzero_si512());
-}
-
-
-/* SIMD load ops */
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_loadu_si512 (void const *__P)
-{
-  struct __loadu_si512 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((struct __loadu_si512*)__P)->__v;
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_loadu_epi32 (void const *__P)
-{
-  struct __loadu_epi32 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((struct __loadu_epi32*)__P)->__v;
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_epi32 (__m512i __W, __mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *) __P,
-                  (__v16si) __W,
-                  (__mmask16) __U);
-}
-
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_epi32(__mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddqusi512_mask ((const int *)__P,
-                                                     (__v16si)
-                                                     _mm512_setzero_si512 (),
-                                                     (__mmask16) __U);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_loadu_epi64 (void const *__P)
-{
-  struct __loadu_epi64 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((struct __loadu_epi64*)__P)->__v;
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_epi64 (__m512i __W, __mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *) __P,
-                  (__v8di) __W,
-                  (__mmask8) __U);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_epi64(__mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_loaddqudi512_mask ((const long long *)__P,
-                                                     (__v8di)
-                                                     _mm512_setzero_si512 (),
-                                                     (__mmask8) __U);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_ps (__m512 __W, __mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_loadups512_mask ((const float *) __P,
-                   (__v16sf) __W,
-                   (__mmask16) __U);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_ps(__mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_loadups512_mask ((const float *)__P,
-                                                  (__v16sf)
-                                                  _mm512_setzero_ps (),
-                                                  (__mmask16) __U);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_loadu_pd (__m512d __W, __mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_loadupd512_mask ((const double *) __P,
-                (__v8df) __W,
-                (__mmask8) __U);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_loadu_pd(__mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_loadupd512_mask ((const double *)__P,
-                                                   (__v8df)
-                                                   _mm512_setzero_pd (),
-                                                   (__mmask8) __U);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_loadu_pd(void const *__p)
-{
-  struct __loadu_pd {
-    __m512d_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((struct __loadu_pd*)__p)->__v;
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_loadu_ps(void const *__p)
-{
-  struct __loadu_ps {
-    __m512_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((struct __loadu_ps*)__p)->__v;
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_load_ps(void const *__p)
-{
-  return *(__m512*)__p;
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_load_ps (__m512 __W, __mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *) __P,
-                   (__v16sf) __W,
-                   (__mmask16) __U);
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_load_ps(__mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_loadaps512_mask ((const __v16sf *)__P,
-                                                  (__v16sf)
-                                                  _mm512_setzero_ps (),
-                                                  (__mmask16) __U);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_load_pd(void const *__p)
-{
-  return *(__m512d*)__p;
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_load_pd (__m512d __W, __mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *) __P,
-                          (__v8df) __W,
-                          (__mmask8) __U);
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_load_pd(__mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_loadapd512_mask ((const __v8df *)__P,
-                                                   (__v8df)
-                                                   _mm512_setzero_pd (),
-                                                   (__mmask8) __U);
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_load_si512 (void const *__P)
-{
-  return *(__m512i *) __P;
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_load_epi32 (void const *__P)
-{
-  return *(__m512i *) __P;
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_load_epi64 (void const *__P)
-{
-  return *(__m512i *) __P;
-}
-
-/* SIMD store ops */
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_epi64 (void *__P, __m512i __A)
-{
-  struct __storeu_epi64 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi64*)__P)->__v = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_epi64(void *__P, __mmask8 __U, __m512i __A)
-{
-  __builtin_ia32_storedqudi512_mask ((long long *)__P, (__v8di) __A,
-                                     (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_si512 (void *__P, __m512i __A)
-{
-  struct __storeu_si512 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si512*)__P)->__v = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_epi32 (void *__P, __m512i __A)
-{
-  struct __storeu_epi32 {
-    __m512i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_epi32*)__P)->__v = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_epi32(void *__P, __mmask16 __U, __m512i __A)
-{
-  __builtin_ia32_storedqusi512_mask ((int *)__P, (__v16si) __A,
-                                     (__mmask16) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_pd(void *__P, __mmask8 __U, __m512d __A)
-{
-  __builtin_ia32_storeupd512_mask ((double *)__P, (__v8df) __A, (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_pd(void *__P, __m512d __A)
-{
-  struct __storeu_pd {
-    __m512d_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_pd*)__P)->__v = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_storeu_ps(void *__P, __mmask16 __U, __m512 __A)
-{
-  __builtin_ia32_storeups512_mask ((float *)__P, (__v16sf) __A,
-                                   (__mmask16) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_storeu_ps(void *__P, __m512 __A)
-{
-  struct __storeu_ps {
-    __m512_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_ps*)__P)->__v = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_store_pd(void *__P, __mmask8 __U, __m512d __A)
-{
-  __builtin_ia32_storeapd512_mask ((__v8df *)__P, (__v8df) __A, (__mmask8) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_store_pd(void *__P, __m512d __A)
-{
-  *(__m512d*)__P = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_mask_store_ps(void *__P, __mmask16 __U, __m512 __A)
-{
-  __builtin_ia32_storeaps512_mask ((__v16sf *)__P, (__v16sf) __A,
-                                   (__mmask16) __U);
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_store_ps(void *__P, __m512 __A)
-{
-  *(__m512*)__P = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_store_si512 (void *__P, __m512i __A)
-{
-  *(__m512i *) __P = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_store_epi32 (void *__P, __m512i __A)
-{
-  *(__m512i *) __P = __A;
-}
-
-static __inline void __DEFAULT_FN_ATTRS512
-_mm512_store_epi64 (void *__P, __m512i __A)
-{
-  *(__m512i *) __P = __A;
-}
-
-/* Mask ops */
-
-static __inline __mmask16 __DEFAULT_FN_ATTRS
-_mm512_knot(__mmask16 __M)
-{
-  return __builtin_ia32_knothi(__M);
-}
-
-/* Integer compare */
-
-#define _mm512_cmpeq_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epi32_mask(A, B) \
-    _mm512_cmp_epi32_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epi32_mask(k, A, B) \
-    _mm512_mask_cmp_epi32_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm512_cmpeq_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epu32_mask(A, B) \
-    _mm512_cmp_epu32_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epu32_mask(k, A, B) \
-    _mm512_mask_cmp_epu32_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm512_cmpeq_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epi64_mask(A, B) \
-    _mm512_cmp_epi64_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epi64_mask(k, A, B) \
-    _mm512_mask_cmp_epi64_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm512_cmpeq_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm512_mask_cmpeq_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm512_cmpge_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_GE)
-#define _mm512_mask_cmpge_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm512_cmpgt_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_GT)
-#define _mm512_mask_cmpgt_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm512_cmple_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_LE)
-#define _mm512_mask_cmple_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm512_cmplt_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_LT)
-#define _mm512_mask_cmplt_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm512_cmpneq_epu64_mask(A, B) \
-    _mm512_cmp_epu64_mask((A), (B), _MM_CMPINT_NE)
-#define _mm512_mask_cmpneq_epu64_mask(k, A, B) \
-    _mm512_mask_cmp_epu64_mask((k), (A), (B), _MM_CMPINT_NE)
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi8_epi32(__m128i __A)
-{
-  /* This function always performs a signed extension, but __v16qi is a char
-     which may be signed or unsigned, so use __v16qs. */
-  return (__m512i)__builtin_convertvector((__v16qs)__A, __v16si);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi8_epi32(__m512i __W, __mmask16 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_cvtepi8_epi32(__A),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi8_epi32(__mmask16 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_cvtepi8_epi32(__A),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi8_epi64(__m128i __A)
-{
-  /* This function always performs a signed extension, but __v16qi is a char
-     which may be signed or unsigned, so use __v16qs. */
-  return (__m512i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__A, (__v16qs)__A, 0, 1, 2, 3, 4, 5, 6, 7), __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi8_epi64(__m512i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi8_epi64(__A),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi8_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_epi64(__m256i __X)
-{
-  return (__m512i)__builtin_convertvector((__v8si)__X, __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_epi64(__m512i __W, __mmask8 __U, __m256i __X)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi32_epi64(__X),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_epi64(__mmask8 __U, __m256i __X)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi32_epi64(__X),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi16_epi32(__m256i __A)
-{
-  return (__m512i)__builtin_convertvector((__v16hi)__A, __v16si);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi16_epi32(__m512i __W, __mmask16 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                            (__v16si)_mm512_cvtepi16_epi32(__A),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi16_epi32(__mmask16 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                            (__v16si)_mm512_cvtepi16_epi32(__A),
-                                            (__v16si)_mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi16_epi64(__m128i __A)
-{
-  return (__m512i)__builtin_convertvector((__v8hi)__A, __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi16_epi64(__m512i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi16_epi64(__A),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepi16_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu8_epi32(__m128i __A)
-{
-  return (__m512i)__builtin_convertvector((__v16qu)__A, __v16si);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu8_epi32(__m512i __W, __mmask16 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_cvtepu8_epi32(__A),
-                                             (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu8_epi32(__mmask16 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                             (__v16si)_mm512_cvtepu8_epi32(__A),
-                                             (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu8_epi64(__m128i __A)
-{
-  return (__m512i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__A, (__v16qu)__A, 0, 1, 2, 3, 4, 5, 6, 7), __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu8_epi64(__m512i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu8_epi64(__A),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu8_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu32_epi64(__m256i __X)
-{
-  return (__m512i)__builtin_convertvector((__v8su)__X, __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu32_epi64(__m512i __W, __mmask8 __U, __m256i __X)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu32_epi64(__X),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu32_epi64(__mmask8 __U, __m256i __X)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu32_epi64(__X),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu16_epi32(__m256i __A)
-{
-  return (__m512i)__builtin_convertvector((__v16hu)__A, __v16si);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu16_epi32(__m512i __W, __mmask16 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                            (__v16si)_mm512_cvtepu16_epi32(__A),
-                                            (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu16_epi32(__mmask16 __U, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                            (__v16si)_mm512_cvtepu16_epi32(__A),
-                                            (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_cvtepu16_epi64(__m128i __A)
-{
-  return (__m512i)__builtin_convertvector((__v8hu)__A, __v8di);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepu16_epi64(__m512i __W, __mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu16_epi64(__A),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_cvtepu16_epi64(__A),
-                                             (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_rorv_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_prorvd512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_rorv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                           (__v16si)_mm512_rorv_epi32(__A, __B),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_rorv_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                           (__v16si)_mm512_rorv_epi32(__A, __B),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_rorv_epi64 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_prorvq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_rorv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                                            (__v8di)_mm512_rorv_epi64(__A, __B),
-                                            (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_rorv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                                            (__v8di)_mm512_rorv_epi64(__A, __B),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-
-
-#define _mm512_cmp_epi32_mask(a, b, p) \
-  (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
-                                         (__v16si)(__m512i)(b), (int)(p), \
-                                         (__mmask16)-1)
-
-#define _mm512_cmp_epu32_mask(a, b, p) \
-  (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
-                                          (__v16si)(__m512i)(b), (int)(p), \
-                                          (__mmask16)-1)
-
-#define _mm512_cmp_epi64_mask(a, b, p) \
-  (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
-                                        (__v8di)(__m512i)(b), (int)(p), \
-                                        (__mmask8)-1)
-
-#define _mm512_cmp_epu64_mask(a, b, p) \
-  (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
-                                         (__v8di)(__m512i)(b), (int)(p), \
-                                         (__mmask8)-1)
-
-#define _mm512_mask_cmp_epi32_mask(m, a, b, p) \
-  (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
-                                         (__v16si)(__m512i)(b), (int)(p), \
-                                         (__mmask16)(m))
-
-#define _mm512_mask_cmp_epu32_mask(m, a, b, p) \
-  (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
-                                          (__v16si)(__m512i)(b), (int)(p), \
-                                          (__mmask16)(m))
-
-#define _mm512_mask_cmp_epi64_mask(m, a, b, p) \
-  (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
-                                        (__v8di)(__m512i)(b), (int)(p), \
-                                        (__mmask8)(m))
-
-#define _mm512_mask_cmp_epu64_mask(m, a, b, p) \
-  (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
-                                         (__v8di)(__m512i)(b), (int)(p), \
-                                         (__mmask8)(m))
-
-#define _mm512_rol_epi32(a, b) \
-  (__m512i)__builtin_ia32_prold512((__v16si)(__m512i)(a), (int)(b))
-
-#define _mm512_mask_rol_epi32(W, U, a, b) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_rol_epi32((a), (b)), \
-                                      (__v16si)(__m512i)(W))
-
-#define _mm512_maskz_rol_epi32(U, a, b) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_rol_epi32((a), (b)), \
-                                      (__v16si)_mm512_setzero_si512())
-
-#define _mm512_rol_epi64(a, b) \
-  (__m512i)__builtin_ia32_prolq512((__v8di)(__m512i)(a), (int)(b))
-
-#define _mm512_mask_rol_epi64(W, U, a, b) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_rol_epi64((a), (b)), \
-                                      (__v8di)(__m512i)(W))
-
-#define _mm512_maskz_rol_epi64(U, a, b) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_rol_epi64((a), (b)), \
-                                      (__v8di)_mm512_setzero_si512())
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_rolv_epi32 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_prolvd512((__v16si)__A, (__v16si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_rolv_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                           (__v16si)_mm512_rolv_epi32(__A, __B),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_rolv_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512(__U,
-                                           (__v16si)_mm512_rolv_epi32(__A, __B),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_rolv_epi64 (__m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_prolvq512((__v8di)__A, (__v8di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_rolv_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                                            (__v8di)_mm512_rolv_epi64(__A, __B),
-                                            (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_rolv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512(__U,
-                                            (__v8di)_mm512_rolv_epi64(__A, __B),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-#define _mm512_ror_epi32(A, B) \
-  (__m512i)__builtin_ia32_prord512((__v16si)(__m512i)(A), (int)(B))
-
-#define _mm512_mask_ror_epi32(W, U, A, B) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_ror_epi32((A), (B)), \
-                                      (__v16si)(__m512i)(W))
-
-#define _mm512_maskz_ror_epi32(U, A, B) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_ror_epi32((A), (B)), \
-                                      (__v16si)_mm512_setzero_si512())
-
-#define _mm512_ror_epi64(A, B) \
-  (__m512i)__builtin_ia32_prorq512((__v8di)(__m512i)(A), (int)(B))
-
-#define _mm512_mask_ror_epi64(W, U, A, B) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_ror_epi64((A), (B)), \
-                                      (__v8di)(__m512i)(W))
-
-#define _mm512_maskz_ror_epi64(U, A, B) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_ror_epi64((A), (B)), \
-                                      (__v8di)_mm512_setzero_si512())
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi32(__m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_slli_epi32(__A, __B),
-                                         (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, int __B) {
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_slli_epi32(__A, __B),
-                                         (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_slli_epi64(__m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_slli_epi64(__A, __B),
-                                          (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_slli_epi64(__A, __B),
-                                          (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi32(__m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_srli_epi32(__A, __B),
-                                         (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, int __B) {
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_srli_epi32(__A, __B),
-                                         (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srli_epi64(__m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_srli_epi64(__A, __B),
-                                          (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_srli_epi64(__A, __B),
-                                          (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_load_epi32 (__m512i __W, __mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P,
-              (__v16si) __W,
-              (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_load_epi32 (__mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_movdqa32load512_mask ((const __v16si *) __P,
-              (__v16si)
-              _mm512_setzero_si512 (),
-              (__mmask16) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_store_epi32 (void *__P, __mmask16 __U, __m512i __A)
-{
-  __builtin_ia32_movdqa32store512_mask ((__v16si *) __P, (__v16si) __A,
-          (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
-                 (__v16si) __A,
-                 (__v16si) __W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_epi32 (__mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
-                 (__v16si) __A,
-                 (__v16si) _mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
-                 (__v8di) __A,
-                 (__v8di) __W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_epi64 (__mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
-                 (__v8di) __A,
-                 (__v8di) _mm512_setzero_si512 ());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_load_epi64 (__m512i __W, __mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P,
-              (__v8di) __W,
-              (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_load_epi64 (__mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_movdqa64load512_mask ((const __v8di *) __P,
-              (__v8di)
-              _mm512_setzero_si512 (),
-              (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_store_epi64 (void *__P, __mmask8 __U, __m512i __A)
-{
-  __builtin_ia32_movdqa64store512_mask ((__v8di *) __P, (__v8di) __A,
-          (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_movedup_pd (__m512d __A)
-{
-  return (__m512d)__builtin_shufflevector((__v8df)__A, (__v8df)__A,
-                                          0, 0, 2, 2, 4, 4, 6, 6);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_movedup_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_movedup_pd(__A),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_movedup_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_movedup_pd(__A),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-#define _mm512_fixupimm_round_pd(A, B, C, imm, R) \
-  (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_fixupimm_round_pd(A, U, B, C, imm, R) \
-  (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)(U), (int)(R))
-
-#define _mm512_fixupimm_pd(A, B, C, imm) \
-  (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)-1, \
-                                             _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_fixupimm_pd(A, U, B, C, imm) \
-  (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
-                                             (__v8df)(__m512d)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)(U), \
-                                             _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_fixupimm_round_pd(U, A, B, C, imm, R) \
-  (__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              (__v8di)(__m512i)(C), \
-                                              (int)(imm), (__mmask8)(U), \
-                                              (int)(R))
-
-#define _mm512_maskz_fixupimm_pd(U, A, B, C, imm) \
-  (__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
-                                              (__v8df)(__m512d)(B), \
-                                              (__v8di)(__m512i)(C), \
-                                              (int)(imm), (__mmask8)(U), \
-                                              _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_fixupimm_round_ps(A, B, C, imm, R) \
-  (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16si)(__m512i)(C), (int)(imm), \
-                                            (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_fixupimm_round_ps(A, U, B, C, imm, R) \
-  (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16si)(__m512i)(C), (int)(imm), \
-                                            (__mmask16)(U), (int)(R))
-
-#define _mm512_fixupimm_ps(A, B, C, imm) \
-  (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16si)(__m512i)(C), (int)(imm), \
-                                            (__mmask16)-1, \
-                                            _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_fixupimm_ps(A, U, B, C, imm) \
-  (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
-                                            (__v16sf)(__m512)(B), \
-                                            (__v16si)(__m512i)(C), (int)(imm), \
-                                            (__mmask16)(U), \
-                                            _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_fixupimm_round_ps(U, A, B, C, imm, R) \
-  (__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             (__v16si)(__m512i)(C), \
-                                             (int)(imm), (__mmask16)(U), \
-                                             (int)(R))
-
-#define _mm512_maskz_fixupimm_ps(U, A, B, C, imm) \
-  (__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
-                                             (__v16sf)(__m512)(B), \
-                                             (__v16si)(__m512i)(C), \
-                                             (int)(imm), (__mmask16)(U), \
-                                             _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_fixupimm_round_sd(A, B, C, imm, R) \
-  (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          (__v2di)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)-1, (int)(R))
-
-#define _mm_mask_fixupimm_round_sd(A, U, B, C, imm, R) \
-  (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          (__v2di)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)(U), (int)(R))
-
-#define _mm_fixupimm_sd(A, B, C, imm) \
-  (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          (__v2di)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)-1, \
-                                          _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_fixupimm_sd(A, U, B, C, imm) \
-  (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          (__v2di)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)(U), \
-                                          _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_fixupimm_round_sd(U, A, B, C, imm, R) \
-  (__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2di)(__m128i)(C), (int)(imm), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_fixupimm_sd(U, A, B, C, imm) \
-  (__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
-                                           (__v2df)(__m128d)(B), \
-                                           (__v2di)(__m128i)(C), (int)(imm), \
-                                           (__mmask8)(U), \
-                                           _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_fixupimm_round_ss(A, B, C, imm, R) \
-  (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         (__v4si)(__m128i)(C), (int)(imm), \
-                                         (__mmask8)-1, (int)(R))
-
-#define _mm_mask_fixupimm_round_ss(A, U, B, C, imm, R) \
-  (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         (__v4si)(__m128i)(C), (int)(imm), \
-                                         (__mmask8)(U), (int)(R))
-
-#define _mm_fixupimm_ss(A, B, C, imm) \
-  (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         (__v4si)(__m128i)(C), (int)(imm), \
-                                         (__mmask8)-1, \
-                                         _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_fixupimm_ss(A, U, B, C, imm) \
-  (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         (__v4si)(__m128i)(C), (int)(imm), \
-                                         (__mmask8)(U), \
-                                         _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_fixupimm_round_ss(U, A, B, C, imm, R) \
-  (__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4si)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_fixupimm_ss(U, A, B, C, imm) \
-  (__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
-                                          (__v4sf)(__m128)(B), \
-                                          (__v4si)(__m128i)(C), (int)(imm), \
-                                          (__mmask8)(U), \
-                                          _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_getexp_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
-                                                 (__v2df)(__m128d)(B), \
-                                                 (__v2df)_mm_setzero_pd(), \
-                                                 (__mmask8)-1, (int)(R))
-
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_getexp_sd (__m128d __A, __m128d __B)
-{
-  return (__m128d) __builtin_ia32_getexpsd128_round_mask ((__v2df) __A,
-                 (__v2df) __B, (__v2df) _mm_setzero_pd(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_getexp_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) __W,
-          (__mmask8) __U,
-          _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_getexp_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
-                                                 (__v2df)(__m128d)(B), \
-                                                 (__v2df)(__m128d)(W), \
-                                                 (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_getexp_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_getexpsd128_round_mask ( (__v2df) __A,
-          (__v2df) __B,
-          (__v2df) _mm_setzero_pd (),
-          (__mmask8) __U,
-          _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_getexp_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
-                                                 (__v2df)(__m128d)(B), \
-                                                 (__v2df)_mm_setzero_pd(), \
-                                                 (__mmask8)(U), (int)(R))
-
-#define _mm_getexp_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
-                                                (__v4sf)(__m128)(B), \
-                                                (__v4sf)_mm_setzero_ps(), \
-                                                (__mmask8)-1, (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_getexp_ss (__m128 __A, __m128 __B)
-{
-  return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
-                (__v4sf) __B, (__v4sf)  _mm_setzero_ps(), (__mmask8) -1, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_getexp_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) __W,
-          (__mmask8) __U,
-          _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_getexp_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
-                                                (__v4sf)(__m128)(B), \
-                                                (__v4sf)(__m128)(W), \
-                                                (__mmask8)(U), (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_getexpss128_round_mask ((__v4sf) __A,
-          (__v4sf) __B,
-          (__v4sf) _mm_setzero_ps (),
-          (__mmask8) __U,
-          _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_getexp_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
-                                                (__v4sf)(__m128)(B), \
-                                                (__v4sf)_mm_setzero_ps(), \
-                                                (__mmask8)(U), (int)(R))
-
-#define _mm_getmant_round_sd(A, B, C, D, R) \
-  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)-1, (int)(R))
-
-#define _mm_getmant_sd(A, B, C, D)  \
-  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)-1, \
-                                               _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_getmant_sd(W, U, A, B, C, D) \
-  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v2df)(__m128d)(W), \
-                                               (__mmask8)(U), \
-                                               _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_getmant_round_sd(W, U, A, B, C, D, R) \
-  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v2df)(__m128d)(W), \
-                                               (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_getmant_sd(U, A, B, C, D) \
-  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)(U), \
-                                               _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_getmant_round_sd(U, A, B, C, D, R) \
-  (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
-                                               (__v2df)(__m128d)(B), \
-                                               (int)(((D)<<2) | (C)), \
-                                               (__v2df)_mm_setzero_pd(), \
-                                               (__mmask8)(U), (int)(R))
-
-#define _mm_getmant_round_ss(A, B, C, D, R) \
-  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (int)(((D)<<2) | (C)), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)-1, (int)(R))
-
-#define _mm_getmant_ss(A, B, C, D) \
-  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (int)(((D)<<2) | (C)), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)-1, \
-                                              _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_getmant_ss(W, U, A, B, C, D) \
-  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (int)(((D)<<2) | (C)), \
-                                              (__v4sf)(__m128)(W), \
-                                              (__mmask8)(U), \
-                                              _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_getmant_round_ss(W, U, A, B, C, D, R) \
-  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (int)(((D)<<2) | (C)), \
-                                              (__v4sf)(__m128)(W), \
-                                              (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_getmant_ss(U, A, B, C, D) \
-  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (int)(((D)<<2) | (C)), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)(U), \
-                                              _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_getmant_round_ss(U, A, B, C, D, R) \
-  (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (int)(((D)<<2) | (C)), \
-                                              (__v4sf)_mm_setzero_ps(), \
-                                              (__mmask8)(U), (int)(R))
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kmov (__mmask16 __A)
-{
-  return  __A;
-}
-
-#define _mm_comi_round_sd(A, B, P, R) \
-  (int)__builtin_ia32_vcomisd((__v2df)(__m128d)(A), (__v2df)(__m128d)(B), \
-                              (int)(P), (int)(R))
-
-#define _mm_comi_round_ss(A, B, P, R) \
-  (int)__builtin_ia32_vcomiss((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \
-                              (int)(P), (int)(R))
-
-#ifdef __x86_64__
-#define _mm_cvt_roundsd_si64(A, R) \
-  (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R))
-#endif
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sll_epi32(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_pslld512((__v16si) __A, (__v4si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sll_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_sll_epi32(__A, __B),
-                                          (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sll_epi32(__mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_sll_epi32(__A, __B),
-                                          (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sll_epi64(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psllq512((__v8di)__A, (__v2di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sll_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                             (__v8di)_mm512_sll_epi64(__A, __B),
-                                             (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sll_epi64(__mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                           (__v8di)_mm512_sll_epi64(__A, __B),
-                                           (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sllv_epi32(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psllv16si((__v16si)__X, (__v16si)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sllv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_sllv_epi32(__X, __Y),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sllv_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_sllv_epi32(__X, __Y),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sllv_epi64(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psllv8di((__v8di)__X, (__v8di)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sllv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_sllv_epi64(__X, __Y),
-                                            (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sllv_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_sllv_epi64(__X, __Y),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sra_epi32(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psrad512((__v16si) __A, (__v4si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sra_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_sra_epi32(__A, __B),
-                                          (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sra_epi32(__mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_sra_epi32(__A, __B),
-                                          (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_sra_epi64(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psraq512((__v8di)__A, (__v2di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_sra_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                           (__v8di)_mm512_sra_epi64(__A, __B),
-                                           (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_sra_epi64(__mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                           (__v8di)_mm512_sra_epi64(__A, __B),
-                                           (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srav_epi32(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psrav16si((__v16si)__X, (__v16si)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srav_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_srav_epi32(__X, __Y),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srav_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_srav_epi32(__X, __Y),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srav_epi64(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psrav8di((__v8di)__X, (__v8di)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srav_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_srav_epi64(__X, __Y),
-                                            (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srav_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_srav_epi64(__X, __Y),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srl_epi32(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psrld512((__v16si) __A, (__v4si)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srl_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_srl_epi32(__A, __B),
-                                          (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srl_epi32(__mmask16 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                          (__v16si)_mm512_srl_epi32(__A, __B),
-                                          (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srl_epi64(__m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_psrlq512((__v8di)__A, (__v2di)__B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srl_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                           (__v8di)_mm512_srl_epi64(__A, __B),
-                                           (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srl_epi64(__mmask8 __U, __m512i __A, __m128i __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                           (__v8di)_mm512_srl_epi64(__A, __B),
-                                           (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srlv_epi32(__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psrlv16si((__v16si)__X, (__v16si)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srlv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_srlv_epi32(__X, __Y),
-                                           (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srlv_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                           (__v16si)_mm512_srlv_epi32(__X, __Y),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srlv_epi64 (__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_psrlv8di((__v8di)__X, (__v8di)__Y);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srlv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_srlv_epi64(__X, __Y),
-                                            (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srlv_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                            (__v8di)_mm512_srlv_epi64(__X, __Y),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-#define _mm512_ternarylogic_epi32(A, B, C, imm) \
-  (__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
-                                            (__v16si)(__m512i)(B), \
-                                            (__v16si)(__m512i)(C), (int)(imm), \
-                                            (__mmask16)-1)
-
-#define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm) \
-  (__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
-                                            (__v16si)(__m512i)(B), \
-                                            (__v16si)(__m512i)(C), (int)(imm), \
-                                            (__mmask16)(U))
-
-#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm) \
-  (__m512i)__builtin_ia32_pternlogd512_maskz((__v16si)(__m512i)(A), \
-                                             (__v16si)(__m512i)(B), \
-                                             (__v16si)(__m512i)(C), \
-                                             (int)(imm), (__mmask16)(U))
-
-#define _mm512_ternarylogic_epi64(A, B, C, imm) \
-  (__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
-                                            (__v8di)(__m512i)(B), \
-                                            (__v8di)(__m512i)(C), (int)(imm), \
-                                            (__mmask8)-1)
-
-#define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm) \
-  (__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
-                                            (__v8di)(__m512i)(B), \
-                                            (__v8di)(__m512i)(C), (int)(imm), \
-                                            (__mmask8)(U))
-
-#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) \
-  (__m512i)__builtin_ia32_pternlogq512_maskz((__v8di)(__m512i)(A), \
-                                             (__v8di)(__m512i)(B), \
-                                             (__v8di)(__m512i)(C), (int)(imm), \
-                                             (__mmask8)(U))
-
-#ifdef __x86_64__
-#define _mm_cvt_roundsd_i64(A, R) \
-  (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R))
-#endif
-
-#define _mm_cvt_roundsd_si32(A, R) \
-  (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R))
-
-#define _mm_cvt_roundsd_i32(A, R) \
-  (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R))
-
-#define _mm_cvt_roundsd_u32(A, R) \
-  (unsigned int)__builtin_ia32_vcvtsd2usi32((__v2df)(__m128d)(A), (int)(R))
-
-static __inline__ unsigned __DEFAULT_FN_ATTRS128
-_mm_cvtsd_u32 (__m128d __A)
-{
-  return (unsigned) __builtin_ia32_vcvtsd2usi32 ((__v2df) __A,
-             _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvt_roundsd_u64(A, R) \
-  (unsigned long long)__builtin_ia32_vcvtsd2usi64((__v2df)(__m128d)(A), \
-                                                  (int)(R))
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
-_mm_cvtsd_u64 (__m128d __A)
-{
-  return (unsigned long long) __builtin_ia32_vcvtsd2usi64 ((__v2df)
-                 __A,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm_cvt_roundss_si32(A, R) \
-  (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R))
-
-#define _mm_cvt_roundss_i32(A, R) \
-  (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R))
-
-#ifdef __x86_64__
-#define _mm_cvt_roundss_si64(A, R) \
-  (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R))
-
-#define _mm_cvt_roundss_i64(A, R) \
-  (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R))
-#endif
-
-#define _mm_cvt_roundss_u32(A, R) \
-  (unsigned int)__builtin_ia32_vcvtss2usi32((__v4sf)(__m128)(A), (int)(R))
-
-static __inline__ unsigned __DEFAULT_FN_ATTRS128
-_mm_cvtss_u32 (__m128 __A)
-{
-  return (unsigned) __builtin_ia32_vcvtss2usi32 ((__v4sf) __A,
-             _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvt_roundss_u64(A, R) \
-  (unsigned long long)__builtin_ia32_vcvtss2usi64((__v4sf)(__m128)(A), \
-                                                  (int)(R))
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
-_mm_cvtss_u64 (__m128 __A)
-{
-  return (unsigned long long) __builtin_ia32_vcvtss2usi64 ((__v4sf)
-                 __A,
-                 _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm_cvtt_roundsd_i32(A, R) \
-  (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R))
-
-#define _mm_cvtt_roundsd_si32(A, R) \
-  (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R))
-
-static __inline__ int __DEFAULT_FN_ATTRS128
-_mm_cvttsd_i32 (__m128d __A)
-{
-  return (int) __builtin_ia32_vcvttsd2si32 ((__v2df) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvtt_roundsd_si64(A, R) \
-  (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R))
-
-#define _mm_cvtt_roundsd_i64(A, R) \
-  (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R))
-
-static __inline__ long long __DEFAULT_FN_ATTRS128
-_mm_cvttsd_i64 (__m128d __A)
-{
-  return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm_cvtt_roundsd_u32(A, R) \
-  (unsigned int)__builtin_ia32_vcvttsd2usi32((__v2df)(__m128d)(A), (int)(R))
-
-static __inline__ unsigned __DEFAULT_FN_ATTRS128
-_mm_cvttsd_u32 (__m128d __A)
-{
-  return (unsigned) __builtin_ia32_vcvttsd2usi32 ((__v2df) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvtt_roundsd_u64(A, R) \
-  (unsigned long long)__builtin_ia32_vcvttsd2usi64((__v2df)(__m128d)(A), \
-                                                   (int)(R))
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
-_mm_cvttsd_u64 (__m128d __A)
-{
-  return (unsigned long long) __builtin_ia32_vcvttsd2usi64 ((__v2df)
-                  __A,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm_cvtt_roundss_i32(A, R) \
-  (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R))
-
-#define _mm_cvtt_roundss_si32(A, R) \
-  (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R))
-
-static __inline__ int __DEFAULT_FN_ATTRS128
-_mm_cvttss_i32 (__m128 __A)
-{
-  return (int) __builtin_ia32_vcvttss2si32 ((__v4sf) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvtt_roundss_i64(A, R) \
-  (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R))
-
-#define _mm_cvtt_roundss_si64(A, R) \
-  (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R))
-
-static __inline__ long long __DEFAULT_FN_ATTRS128
-_mm_cvttss_i64 (__m128 __A)
-{
-  return (long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm_cvtt_roundss_u32(A, R) \
-  (unsigned int)__builtin_ia32_vcvttss2usi32((__v4sf)(__m128)(A), (int)(R))
-
-static __inline__ unsigned __DEFAULT_FN_ATTRS128
-_mm_cvttss_u32 (__m128 __A)
-{
-  return (unsigned) __builtin_ia32_vcvttss2usi32 ((__v4sf) __A,
-              _MM_FROUND_CUR_DIRECTION);
-}
-
-#ifdef __x86_64__
-#define _mm_cvtt_roundss_u64(A, R) \
-  (unsigned long long)__builtin_ia32_vcvttss2usi64((__v4sf)(__m128)(A), \
-                                                   (int)(R))
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
-_mm_cvttss_u64 (__m128 __A)
-{
-  return (unsigned long long) __builtin_ia32_vcvttss2usi64 ((__v4sf)
-                  __A,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-#endif
-
-#define _mm512_permute_pd(X, C) \
-  (__m512d)__builtin_ia32_vpermilpd512((__v8df)(__m512d)(X), (int)(C))
-
-#define _mm512_mask_permute_pd(W, U, X, C) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_permute_pd((X), (C)), \
-                                       (__v8df)(__m512d)(W))
-
-#define _mm512_maskz_permute_pd(U, X, C) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_permute_pd((X), (C)), \
-                                       (__v8df)_mm512_setzero_pd())
-
-#define _mm512_permute_ps(X, C) \
-  (__m512)__builtin_ia32_vpermilps512((__v16sf)(__m512)(X), (int)(C))
-
-#define _mm512_mask_permute_ps(W, U, X, C) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_permute_ps((X), (C)), \
-                                      (__v16sf)(__m512)(W))
-
-#define _mm512_maskz_permute_ps(U, X, C) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_permute_ps((X), (C)), \
-                                      (__v16sf)_mm512_setzero_ps())
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_permutevar_pd(__m512d __A, __m512i __C)
-{
-  return (__m512d)__builtin_ia32_vpermilvarpd512((__v8df)__A, (__v8di)__C);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_permutevar_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512i __C)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                         (__v8df)_mm512_permutevar_pd(__A, __C),
-                                         (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutevar_pd(__mmask8 __U, __m512d __A, __m512i __C)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                         (__v8df)_mm512_permutevar_pd(__A, __C),
-                                         (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_permutevar_ps(__m512 __A, __m512i __C)
-{
-  return (__m512)__builtin_ia32_vpermilvarps512((__v16sf)__A, (__v16si)__C);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_permutevar_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512i __C)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                        (__v16sf)_mm512_permutevar_ps(__A, __C),
-                                        (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutevar_ps(__mmask16 __U, __m512 __A, __m512i __C)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                        (__v16sf)_mm512_permutevar_ps(__A, __C),
-                                        (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline __m512d __DEFAULT_FN_ATTRS512
-_mm512_permutex2var_pd(__m512d __A, __m512i __I, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_vpermi2varpd512((__v8df)__A, (__v8di)__I,
-                                                 (__v8df)__B);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_permutex2var_pd(__m512d __A, __mmask8 __U, __m512i __I, __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                  (__v8df)_mm512_permutex2var_pd(__A, __I, __B),
-                                  (__v8df)__A);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask2_permutex2var_pd(__m512d __A, __m512i __I, __mmask8 __U,
-                             __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                  (__v8df)_mm512_permutex2var_pd(__A, __I, __B),
-                                  (__v8df)(__m512d)__I);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutex2var_pd(__mmask8 __U, __m512d __A, __m512i __I,
-                             __m512d __B)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__U,
-                                  (__v8df)_mm512_permutex2var_pd(__A, __I, __B),
-                                  (__v8df)_mm512_setzero_pd());
-}
-
-static __inline __m512 __DEFAULT_FN_ATTRS512
-_mm512_permutex2var_ps(__m512 __A, __m512i __I, __m512 __B)
-{
-  return (__m512)__builtin_ia32_vpermi2varps512((__v16sf)__A, (__v16si)__I,
-                                                (__v16sf) __B);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_permutex2var_ps(__m512 __A, __mmask16 __U, __m512i __I, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                 (__v16sf)_mm512_permutex2var_ps(__A, __I, __B),
-                                 (__v16sf)__A);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask2_permutex2var_ps(__m512 __A, __m512i __I, __mmask16 __U, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                 (__v16sf)_mm512_permutex2var_ps(__A, __I, __B),
-                                 (__v16sf)(__m512)__I);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutex2var_ps(__mmask16 __U, __m512 __A, __m512i __I, __m512 __B)
-{
-  return (__m512)__builtin_ia32_selectps_512(__U,
-                                 (__v16sf)_mm512_permutex2var_ps(__A, __I, __B),
-                                 (__v16sf)_mm512_setzero_ps());
-}
-
-
-#define _mm512_cvtt_roundpd_epu32(A, R) \
-  (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8si)_mm256_undefined_si256(), \
-                                             (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_cvtt_roundpd_epu32(W, U, A, R) \
-  (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8si)(__m256i)(W), \
-                                             (__mmask8)(U), (int)(R))
-
-#define _mm512_maskz_cvtt_roundpd_epu32(U, A, R) \
-  (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
-                                             (__v8si)_mm256_setzero_si256(), \
-                                             (__mmask8)(U), (int)(R))
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvttpd_epu32 (__m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
-                  (__v8si)
-                  _mm256_undefined_si256 (),
-                  (__mmask8) -1,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvttpd_epu32 (__m256i __W, __mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
-                  (__v8si) __W,
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvttpd_epu32 (__mmask8 __U, __m512d __A)
-{
-  return (__m256i) __builtin_ia32_cvttpd2udq512_mask ((__v8df) __A,
-                  (__v8si)
-                  _mm256_setzero_si256 (),
-                  (__mmask8) __U,
-                  _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_roundscale_round_sd(A, B, imm, R) \
-  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)-1, (int)(imm), \
-                                                (int)(R))
-
-#define _mm_roundscale_sd(A, B, imm) \
-  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)-1, (int)(imm), \
-                                                _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_roundscale_sd(W, U, A, B, imm) \
-  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (__v2df)(__m128d)(W), \
-                                                (__mmask8)(U), (int)(imm), \
-                                                _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_roundscale_round_sd(W, U, A, B, I, R) \
-  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (__v2df)(__m128d)(W), \
-                                                (__mmask8)(U), (int)(I), \
-                                                (int)(R))
-
-#define _mm_maskz_roundscale_sd(U, A, B, I) \
-  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)(U), (int)(I), \
-                                                _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_roundscale_round_sd(U, A, B, I, R) \
-  (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
-                                                (__v2df)(__m128d)(B), \
-                                                (__v2df)_mm_setzero_pd(), \
-                                                (__mmask8)(U), (int)(I), \
-                                                (int)(R))
-
-#define _mm_roundscale_round_ss(A, B, imm, R) \
-  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)-1, (int)(imm), \
-                                               (int)(R))
-
-#define _mm_roundscale_ss(A, B, imm) \
-  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)-1, (int)(imm), \
-                                               _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_roundscale_ss(W, U, A, B, I) \
-  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v4sf)(__m128)(W), \
-                                               (__mmask8)(U), (int)(I), \
-                                               _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_roundscale_round_ss(W, U, A, B, I, R) \
-  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v4sf)(__m128)(W), \
-                                               (__mmask8)(U), (int)(I), \
-                                               (int)(R))
-
-#define _mm_maskz_roundscale_ss(U, A, B, I) \
-  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)(U), (int)(I), \
-                                               _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_roundscale_round_ss(U, A, B, I, R) \
-  (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
-                                               (__v4sf)(__m128)(B), \
-                                               (__v4sf)_mm_setzero_ps(), \
-                                               (__mmask8)(U), (int)(I), \
-                                               (int)(R))
-
-#define _mm512_scalef_round_pd(A, B, R) \
-  (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           (__v8df)_mm512_undefined_pd(), \
-                                           (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_scalef_round_pd(W, U, A, B, R) \
-  (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           (__v8df)(__m512d)(W), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm512_maskz_scalef_round_pd(U, A, B, R) \
-  (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(B), \
-                                           (__v8df)_mm512_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_scalef_pd (__m512d __A, __m512d __B)
-{
-  return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
-                (__v8df) __B,
-                (__v8df)
-                _mm512_undefined_pd (),
-                (__mmask8) -1,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_scalef_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
-                (__v8df) __B,
-                (__v8df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_scalef_pd (__mmask8 __U, __m512d __A, __m512d __B)
-{
-  return (__m512d) __builtin_ia32_scalefpd512_mask ((__v8df) __A,
-                (__v8df) __B,
-                (__v8df)
-                _mm512_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_scalef_round_ps(A, B, R) \
-  (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          (__v16sf)_mm512_undefined_ps(), \
-                                          (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_scalef_round_ps(W, U, A, B, R) \
-  (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          (__v16sf)(__m512)(W), \
-                                          (__mmask16)(U), (int)(R))
-
-#define _mm512_maskz_scalef_round_ps(U, A, B, R) \
-  (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(B), \
-                                          (__v16sf)_mm512_setzero_ps(), \
-                                          (__mmask16)(U), (int)(R))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_scalef_ps (__m512 __A, __m512 __B)
-{
-  return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
-               (__v16sf) __B,
-               (__v16sf)
-               _mm512_undefined_ps (),
-               (__mmask16) -1,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_scalef_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
-               (__v16sf) __B,
-               (__v16sf) __W,
-               (__mmask16) __U,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_scalef_ps (__mmask16 __U, __m512 __A, __m512 __B)
-{
-  return (__m512) __builtin_ia32_scalefps512_mask ((__v16sf) __A,
-               (__v16sf) __B,
-               (__v16sf)
-               _mm512_setzero_ps (),
-               (__mmask16) __U,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_scalef_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
-                                              (__v2df)(__m128d)(B), \
-                                              (__v2df)_mm_setzero_pd(), \
-                                              (__mmask8)-1, (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_scalef_sd (__m128d __A, __m128d __B)
-{
-  return (__m128d) __builtin_ia32_scalefsd_round_mask ((__v2df) __A,
-              (__v2df)( __B), (__v2df) _mm_setzero_pd(),
-              (__mmask8) -1,
-              _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_scalef_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A,
-                 (__v2df) __B,
-                (__v2df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_scalef_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
-                                              (__v2df)(__m128d)(B), \
-                                              (__v2df)(__m128d)(W), \
-                                              (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_scalef_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_scalefsd_round_mask ( (__v2df) __A,
-                 (__v2df) __B,
-                (__v2df) _mm_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_scalef_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
-                                              (__v2df)(__m128d)(B), \
-                                              (__v2df)_mm_setzero_pd(), \
-                                              (__mmask8)(U), (int)(R))
-
-#define _mm_scalef_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
-                                             (__v4sf)(__m128)(B), \
-                                             (__v4sf)_mm_setzero_ps(), \
-                                             (__mmask8)-1, (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_scalef_ss (__m128 __A, __m128 __B)
-{
-  return (__m128) __builtin_ia32_scalefss_round_mask ((__v4sf) __A,
-             (__v4sf)( __B), (__v4sf) _mm_setzero_ps(),
-             (__mmask8) -1,
-             _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_scalef_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A,
-                (__v4sf) __B,
-                (__v4sf) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_scalef_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
-                                             (__v4sf)(__m128)(B), \
-                                             (__v4sf)(__m128)(W), \
-                                             (__mmask8)(U), (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_scalefss_round_mask ( (__v4sf) __A,
-                 (__v4sf) __B,
-                (__v4sf) _mm_setzero_ps (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_scalef_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
-                                             (__v4sf)(__m128)(B), \
-                                             (__v4sf)_mm_setzero_ps(), \
-                                             (__mmask8)(U), \
-                                             (int)(R))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi32(__m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_psradi512((__v16si)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_srai_epi32(__A, __B),
-                                         (__v16si)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A, int __B) {
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
-                                         (__v16si)_mm512_srai_epi32(__A, __B),
-                                         (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_srai_epi64(__m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, __B);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_srai_epi64(__A, __B),
-                                          (__v8di)__W);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, int __B)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
-                                          (__v8di)_mm512_srai_epi64(__A, __B),
-                                          (__v8di)_mm512_setzero_si512());
-}
-
-#define _mm512_shuffle_f32x4(A, B, imm) \
-  (__m512)__builtin_ia32_shuf_f32x4((__v16sf)(__m512)(A), \
-                                    (__v16sf)(__m512)(B), (int)(imm))
-
-#define _mm512_mask_shuffle_f32x4(W, U, A, B, imm) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
-                                      (__v16sf)(__m512)(W))
-
-#define _mm512_maskz_shuffle_f32x4(U, A, B, imm) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
-                                      (__v16sf)_mm512_setzero_ps())
-
-#define _mm512_shuffle_f64x2(A, B, imm) \
-  (__m512d)__builtin_ia32_shuf_f64x2((__v8df)(__m512d)(A), \
-                                     (__v8df)(__m512d)(B), (int)(imm))
-
-#define _mm512_mask_shuffle_f64x2(W, U, A, B, imm) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
-                                       (__v8df)(__m512d)(W))
-
-#define _mm512_maskz_shuffle_f64x2(U, A, B, imm) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
-                                       (__v8df)_mm512_setzero_pd())
-
-#define _mm512_shuffle_i32x4(A, B, imm) \
-  (__m512i)__builtin_ia32_shuf_i32x4((__v16si)(__m512i)(A), \
-                                     (__v16si)(__m512i)(B), (int)(imm))
-
-#define _mm512_mask_shuffle_i32x4(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
-                                      (__v16si)(__m512i)(W))
-
-#define _mm512_maskz_shuffle_i32x4(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
-                                      (__v16si)_mm512_setzero_si512())
-
-#define _mm512_shuffle_i64x2(A, B, imm) \
-  (__m512i)__builtin_ia32_shuf_i64x2((__v8di)(__m512i)(A), \
-                                     (__v8di)(__m512i)(B), (int)(imm))
-
-#define _mm512_mask_shuffle_i64x2(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
-                                      (__v8di)(__m512i)(W))
-
-#define _mm512_maskz_shuffle_i64x2(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
-                                      (__v8di)_mm512_setzero_si512())
-
-#define _mm512_shuffle_pd(A, B, M) \
-  (__m512d)__builtin_ia32_shufpd512((__v8df)(__m512d)(A), \
-                                    (__v8df)(__m512d)(B), (int)(M))
-
-#define _mm512_mask_shuffle_pd(W, U, A, B, M) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
-                                       (__v8df)(__m512d)(W))
-
-#define _mm512_maskz_shuffle_pd(U, A, B, M) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
-                                       (__v8df)_mm512_setzero_pd())
-
-#define _mm512_shuffle_ps(A, B, M) \
-  (__m512)__builtin_ia32_shufps512((__v16sf)(__m512)(A), \
-                                   (__v16sf)(__m512)(B), (int)(M))
-
-#define _mm512_mask_shuffle_ps(W, U, A, B, M) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
-                                      (__v16sf)(__m512)(W))
-
-#define _mm512_maskz_shuffle_ps(U, A, B, M) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                      (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
-                                      (__v16sf)_mm512_setzero_ps())
-
-#define _mm_sqrt_round_sd(A, B, R) \
-  (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)-1, (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_sqrt_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A,
-                 (__v2df) __B,
-                (__v2df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_sqrt_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)(__m128d)(W), \
-                                            (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_sqrt_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
- return (__m128d) __builtin_ia32_sqrtsd_round_mask ( (__v2df) __A,
-                 (__v2df) __B,
-                (__v2df) _mm_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_sqrt_round_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
-                                            (__v2df)(__m128d)(B), \
-                                            (__v2df)_mm_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R))
-
-#define _mm_sqrt_round_ss(A, B, R) \
-  (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)-1, (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_sqrt_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A,
-                 (__v4sf) __B,
-                (__v4sf) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask_sqrt_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)(__m128)(W), (__mmask8)(U), \
-                                           (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_sqrt_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
- return (__m128) __builtin_ia32_sqrtss_round_mask ( (__v4sf) __A,
-                 (__v4sf) __B,
-                (__v4sf) _mm_setzero_ps (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_sqrt_round_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
-                                           (__v4sf)(__m128)(B), \
-                                           (__v4sf)_mm_setzero_ps(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_broadcast_f32x4(__m128 __A)
-{
-  return (__m512)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A,
-                                         0, 1, 2, 3, 0, 1, 2, 3,
-                                         0, 1, 2, 3, 0, 1, 2, 3);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcast_f32x4(__m512 __O, __mmask16 __M, __m128 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__M,
-                                           (__v16sf)_mm512_broadcast_f32x4(__A),
-                                           (__v16sf)__O);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcast_f32x4(__mmask16 __M, __m128 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__M,
-                                           (__v16sf)_mm512_broadcast_f32x4(__A),
-                                           (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_broadcast_f64x4(__m256d __A)
-{
-  return (__m512d)__builtin_shufflevector((__v4df)__A, (__v4df)__A,
-                                          0, 1, 2, 3, 0, 1, 2, 3);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcast_f64x4(__m512d __O, __mmask8 __M, __m256d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M,
-                                            (__v8df)_mm512_broadcast_f64x4(__A),
-                                            (__v8df)__O);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcast_f64x4(__mmask8 __M, __m256d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M,
-                                            (__v8df)_mm512_broadcast_f64x4(__A),
-                                            (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcast_i32x4(__m128i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
-                                          0, 1, 2, 3, 0, 1, 2, 3,
-                                          0, 1, 2, 3, 0, 1, 2, 3);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcast_i32x4(__m512i __O, __mmask16 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                           (__v16si)_mm512_broadcast_i32x4(__A),
-                                           (__v16si)__O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcast_i32x4(__mmask16 __M, __m128i __A)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                           (__v16si)_mm512_broadcast_i32x4(__A),
-                                           (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_broadcast_i64x4(__m256i __A)
-{
-  return (__m512i)__builtin_shufflevector((__v4di)__A, (__v4di)__A,
-                                          0, 1, 2, 3, 0, 1, 2, 3);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcast_i64x4(__m512i __O, __mmask8 __M, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                            (__v8di)_mm512_broadcast_i64x4(__A),
-                                            (__v8di)__O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcast_i64x4(__mmask8 __M, __m256i __A)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                            (__v8di)_mm512_broadcast_i64x4(__A),
-                                            (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastsd_pd (__m512d __O, __mmask8 __M, __m128d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__M,
-                                              (__v8df) _mm512_broadcastsd_pd(__A),
-                                              (__v8df) __O);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastsd_pd (__mmask8 __M, __m128d __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512(__M,
-                                              (__v8df) _mm512_broadcastsd_pd(__A),
-                                              (__v8df) _mm512_setzero_pd());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_broadcastss_ps (__m512 __O, __mmask16 __M, __m128 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512(__M,
-                                             (__v16sf) _mm512_broadcastss_ps(__A),
-                                             (__v16sf) __O);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_broadcastss_ps (__mmask16 __M, __m128 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512(__M,
-                                             (__v16sf) _mm512_broadcastss_ps(__A),
-                                             (__v16sf) _mm512_setzero_ps());
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi32_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
-               (__v16qi) _mm_undefined_si128 (),
-               (__mmask16) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
-               (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi32_epi8 (__mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsdb512_mask ((__v16si) __A,
-               (__v16qi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovsdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi32_epi16 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
-               (__v16hi) _mm256_undefined_si256 (),
-               (__mmask16) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
-               (__v16hi) __O, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi32_epi16 (__mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsdw512_mask ((__v16si) __A,
-               (__v16hi) _mm256_setzero_si256 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovsdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi64_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
-               (__v16qi) _mm_undefined_si128 (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
-               (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi64_epi8 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqb512_mask ((__v8di) __A,
-               (__v16qi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovsqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi64_epi32 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
-               (__v8si) _mm256_undefined_si256 (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
-               (__v8si) __O, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi64_epi32 (__mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovsqd512_mask ((__v8di) __A,
-               (__v8si) _mm256_setzero_si256 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_storeu_epi32 (void *__P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovsqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtsepi64_epi16 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
-               (__v8hi) _mm_undefined_si128 (),
-               (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
-               (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtsepi64_epi16 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovsqw512_mask ((__v8di) __A,
-               (__v8hi) _mm_setzero_si128 (),
-               __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtsepi64_storeu_epi16 (void * __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovsqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi32_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
-                (__v16qi) _mm_undefined_si128 (),
-                (__mmask16) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
-                (__v16qi) __O,
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi32_epi8 (__mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusdb512_mask ((__v16si) __A,
-                (__v16qi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovusdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi32_epi16 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
-                (__v16hi) _mm256_undefined_si256 (),
-                (__mmask16) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
-                (__v16hi) __O,
-                __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi32_epi16 (__mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusdw512_mask ((__v16si) __A,
-                (__v16hi) _mm256_setzero_si256 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi32_storeu_epi16 (void *__P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovusdw512mem_mask ((__v16hi*) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi64_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
-                (__v16qi) _mm_undefined_si128 (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
-                (__v16qi) __O,
-                __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi64_epi8 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqb512_mask ((__v8di) __A,
-                (__v16qi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovusqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi64_epi32 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
-                (__v8si) _mm256_undefined_si256 (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
-                (__v8si) __O, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi64_epi32 (__mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovusqd512_mask ((__v8di) __A,
-                (__v8si) _mm256_setzero_si256 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovusqd512mem_mask ((__v8si*) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtusepi64_epi16 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
-                (__v8hi) _mm_undefined_si128 (),
-                (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
-                (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtusepi64_epi16 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovusqw512_mask ((__v8di) __A,
-                (__v8hi) _mm_setzero_si128 (),
-                __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtusepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovusqw512mem_mask ((__v8hi*) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
-              (__v16qi) _mm_undefined_si128 (),
-              (__mmask16) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_epi8 (__m128i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
-              (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_epi8 (__mmask16 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovdb512_mask ((__v16si) __A,
-              (__v16qi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_storeu_epi8 (void * __P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovdb512mem_mask ((__v16qi *) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi32_epi16 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
-              (__v16hi) _mm256_undefined_si256 (),
-              (__mmask16) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_epi16 (__m256i __O, __mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
-              (__v16hi) __O, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi32_epi16 (__mmask16 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovdw512_mask ((__v16si) __A,
-              (__v16hi) _mm256_setzero_si256 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi32_storeu_epi16 (void * __P, __mmask16 __M, __m512i __A)
-{
-  __builtin_ia32_pmovdw512mem_mask ((__v16hi *) __P, (__v16si) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi64_epi8 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
-              (__v16qi) _mm_undefined_si128 (),
-              (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_epi8 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
-              (__v16qi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi64_epi8 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqb512_mask ((__v8di) __A,
-              (__v16qi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_storeu_epi8 (void * __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovqb512mem_mask ((__v16qi *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi64_epi32 (__m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
-              (__v8si) _mm256_undefined_si256 (),
-              (__mmask8) -1);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_epi32 (__m256i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
-              (__v8si) __O, __M);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi64_epi32 (__mmask8 __M, __m512i __A)
-{
-  return (__m256i) __builtin_ia32_pmovqd512_mask ((__v8di) __A,
-              (__v8si) _mm256_setzero_si256 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_storeu_epi32 (void* __P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovqd512mem_mask ((__v8si *) __P, (__v8di) __A, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_cvtepi64_epi16 (__m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
-              (__v8hi) _mm_undefined_si128 (),
-              (__mmask8) -1);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_epi16 (__m128i __O, __mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
-              (__v8hi) __O, __M);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtepi64_epi16 (__mmask8 __M, __m512i __A)
-{
-  return (__m128i) __builtin_ia32_pmovqw512_mask ((__v8di) __A,
-              (__v8hi) _mm_setzero_si128 (),
-              __M);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
-{
-  __builtin_ia32_pmovqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M);
-}
-
-#define _mm512_extracti32x4_epi32(A, imm) \
-  (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
-                                            (__v4si)_mm_undefined_si128(), \
-                                            (__mmask8)-1)
-
-#define _mm512_mask_extracti32x4_epi32(W, U, A, imm) \
-  (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
-                                            (__v4si)(__m128i)(W), \
-                                            (__mmask8)(U))
-
-#define _mm512_maskz_extracti32x4_epi32(U, A, imm) \
-  (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
-                                            (__v4si)_mm_setzero_si128(), \
-                                            (__mmask8)(U))
-
-#define _mm512_extracti64x4_epi64(A, imm) \
-  (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
-                                            (__v4di)_mm256_undefined_si256(), \
-                                            (__mmask8)-1)
-
-#define _mm512_mask_extracti64x4_epi64(W, U, A, imm) \
-  (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
-                                            (__v4di)(__m256i)(W), \
-                                            (__mmask8)(U))
-
-#define _mm512_maskz_extracti64x4_epi64(U, A, imm) \
-  (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
-                                            (__v4di)_mm256_setzero_si256(), \
-                                            (__mmask8)(U))
-
-#define _mm512_insertf64x4(A, B, imm) \
-  (__m512d)__builtin_ia32_insertf64x4((__v8df)(__m512d)(A), \
-                                      (__v4df)(__m256d)(B), (int)(imm))
-
-#define _mm512_mask_insertf64x4(W, U, A, B, imm) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                  (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
-                                  (__v8df)(__m512d)(W))
-
-#define _mm512_maskz_insertf64x4(U, A, B, imm) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                  (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
-                                  (__v8df)_mm512_setzero_pd())
-
-#define _mm512_inserti64x4(A, B, imm) \
-  (__m512i)__builtin_ia32_inserti64x4((__v8di)(__m512i)(A), \
-                                      (__v4di)(__m256i)(B), (int)(imm))
-
-#define _mm512_mask_inserti64x4(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                  (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
-                                  (__v8di)(__m512i)(W))
-
-#define _mm512_maskz_inserti64x4(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                  (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
-                                  (__v8di)_mm512_setzero_si512())
-
-#define _mm512_insertf32x4(A, B, imm) \
-  (__m512)__builtin_ia32_insertf32x4((__v16sf)(__m512)(A), \
-                                     (__v4sf)(__m128)(B), (int)(imm))
-
-#define _mm512_mask_insertf32x4(W, U, A, B, imm) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                 (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
-                                 (__v16sf)(__m512)(W))
-
-#define _mm512_maskz_insertf32x4(U, A, B, imm) \
-  (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
-                                 (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
-                                 (__v16sf)_mm512_setzero_ps())
-
-#define _mm512_inserti32x4(A, B, imm) \
-  (__m512i)__builtin_ia32_inserti32x4((__v16si)(__m512i)(A), \
-                                      (__v4si)(__m128i)(B), (int)(imm))
-
-#define _mm512_mask_inserti32x4(W, U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                 (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
-                                 (__v16si)(__m512i)(W))
-
-#define _mm512_maskz_inserti32x4(U, A, B, imm) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                 (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
-                                 (__v16si)_mm512_setzero_si512())
-
-#define _mm512_getmant_round_pd(A, B, C, R) \
-  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8df)_mm512_undefined_pd(), \
-                                            (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_getmant_round_pd(W, U, A, B, C, R) \
-  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8df)(__m512d)(W), \
-                                            (__mmask8)(U), (int)(R))
-
-#define _mm512_maskz_getmant_round_pd(U, A, B, C, R) \
-  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8df)_mm512_setzero_pd(), \
-                                            (__mmask8)(U), (int)(R))
-
-#define _mm512_getmant_pd(A, B, C) \
-  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8df)_mm512_setzero_pd(), \
-                                            (__mmask8)-1, \
-                                            _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_getmant_pd(W, U, A, B, C) \
-  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8df)(__m512d)(W), \
-                                            (__mmask8)(U), \
-                                            _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_getmant_pd(U, A, B, C) \
-  (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
-                                            (int)(((C)<<2) | (B)), \
-                                            (__v8df)_mm512_setzero_pd(), \
-                                            (__mmask8)(U), \
-                                            _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_getmant_round_ps(A, B, C, R) \
-  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                           (int)(((C)<<2) | (B)), \
-                                           (__v16sf)_mm512_undefined_ps(), \
-                                           (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_getmant_round_ps(W, U, A, B, C, R) \
-  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                           (int)(((C)<<2) | (B)), \
-                                           (__v16sf)(__m512)(W), \
-                                           (__mmask16)(U), (int)(R))
-
-#define _mm512_maskz_getmant_round_ps(U, A, B, C, R) \
-  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                           (int)(((C)<<2) | (B)), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)(U), (int)(R))
-
-#define _mm512_getmant_ps(A, B, C) \
-  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                           (int)(((C)<<2)|(B)), \
-                                           (__v16sf)_mm512_undefined_ps(), \
-                                           (__mmask16)-1, \
-                                           _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_mask_getmant_ps(W, U, A, B, C) \
-  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                           (int)(((C)<<2)|(B)), \
-                                           (__v16sf)(__m512)(W), \
-                                           (__mmask16)(U), \
-                                           _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_maskz_getmant_ps(U, A, B, C) \
-  (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
-                                           (int)(((C)<<2)|(B)), \
-                                           (__v16sf)_mm512_setzero_ps(), \
-                                           (__mmask16)(U), \
-                                           _MM_FROUND_CUR_DIRECTION)
-
-#define _mm512_getexp_round_pd(A, R) \
-  (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)_mm512_undefined_pd(), \
-                                           (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_getexp_round_pd(W, U, A, R) \
-  (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)(__m512d)(W), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm512_maskz_getexp_round_pd(U, A, R) \
-  (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
-                                           (__v8df)_mm512_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_getexp_pd (__m512d __A)
-{
-  return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
-                (__v8df) _mm512_undefined_pd (),
-                (__mmask8) -1,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_getexp_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
-                (__v8df) __W,
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_getexp_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_getexppd512_mask ((__v8df) __A,
-                (__v8df) _mm512_setzero_pd (),
-                (__mmask8) __U,
-                _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_getexp_round_ps(A, R) \
-  (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)_mm512_undefined_ps(), \
-                                          (__mmask16)-1, (int)(R))
-
-#define _mm512_mask_getexp_round_ps(W, U, A, R) \
-  (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)(__m512)(W), \
-                                          (__mmask16)(U), (int)(R))
-
-#define _mm512_maskz_getexp_round_ps(U, A, R) \
-  (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
-                                          (__v16sf)_mm512_setzero_ps(), \
-                                          (__mmask16)(U), (int)(R))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_getexp_ps (__m512 __A)
-{
-  return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
-               (__v16sf) _mm512_undefined_ps (),
-               (__mmask16) -1,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_getexp_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
-               (__v16sf) __W,
-               (__mmask16) __U,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_getexp_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_getexpps512_mask ((__v16sf) __A,
-               (__v16sf) _mm512_setzero_ps (),
-               (__mmask16) __U,
-               _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm512_i64gather_ps(index, addr, scale) \
-  (__m256)__builtin_ia32_gatherdiv16sf((__v8sf)_mm256_undefined_ps(), \
-                                       (void const *)(addr), \
-                                       (__v8di)(__m512i)(index), (__mmask8)-1, \
-                                       (int)(scale))
-
-#define _mm512_mask_i64gather_ps(v1_old, mask, index, addr, scale) \
-  (__m256)__builtin_ia32_gatherdiv16sf((__v8sf)(__m256)(v1_old),\
-                                       (void const *)(addr), \
-                                       (__v8di)(__m512i)(index), \
-                                       (__mmask8)(mask), (int)(scale))
-
-#define _mm512_i64gather_epi32(index, addr, scale) \
-  (__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_si256(), \
-                                        (void const *)(addr), \
-                                        (__v8di)(__m512i)(index), \
-                                        (__mmask8)-1, (int)(scale))
-
-#define _mm512_mask_i64gather_epi32(v1_old, mask, index, addr, scale) \
-  (__m256i)__builtin_ia32_gatherdiv16si((__v8si)(__m256i)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v8di)(__m512i)(index), \
-                                        (__mmask8)(mask), (int)(scale))
-
-#define _mm512_i64gather_pd(index, addr, scale) \
-  (__m512d)__builtin_ia32_gatherdiv8df((__v8df)_mm512_undefined_pd(), \
-                                       (void const *)(addr), \
-                                       (__v8di)(__m512i)(index), (__mmask8)-1, \
-                                       (int)(scale))
-
-#define _mm512_mask_i64gather_pd(v1_old, mask, index, addr, scale) \
-  (__m512d)__builtin_ia32_gatherdiv8df((__v8df)(__m512d)(v1_old), \
-                                       (void const *)(addr), \
-                                       (__v8di)(__m512i)(index), \
-                                       (__mmask8)(mask), (int)(scale))
-
-#define _mm512_i64gather_epi64(index, addr, scale) \
-  (__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_epi32(), \
-                                       (void const *)(addr), \
-                                       (__v8di)(__m512i)(index), (__mmask8)-1, \
-                                       (int)(scale))
-
-#define _mm512_mask_i64gather_epi64(v1_old, mask, index, addr, scale) \
-  (__m512i)__builtin_ia32_gatherdiv8di((__v8di)(__m512i)(v1_old), \
-                                       (void const *)(addr), \
-                                       (__v8di)(__m512i)(index), \
-                                       (__mmask8)(mask), (int)(scale))
-
-#define _mm512_i32gather_ps(index, addr, scale) \
-  (__m512)__builtin_ia32_gathersiv16sf((__v16sf)_mm512_undefined_ps(), \
-                                       (void const *)(addr), \
-                                       (__v16sf)(__m512)(index), \
-                                       (__mmask16)-1, (int)(scale))
-
-#define _mm512_mask_i32gather_ps(v1_old, mask, index, addr, scale) \
-  (__m512)__builtin_ia32_gathersiv16sf((__v16sf)(__m512)(v1_old), \
-                                       (void const *)(addr), \
-                                       (__v16sf)(__m512)(index), \
-                                       (__mmask16)(mask), (int)(scale))
-
-#define _mm512_i32gather_epi32(index, addr, scale) \
-  (__m512i)__builtin_ia32_gathersiv16si((__v16si)_mm512_undefined_epi32(), \
-                                        (void const *)(addr), \
-                                        (__v16si)(__m512i)(index), \
-                                        (__mmask16)-1, (int)(scale))
-
-#define _mm512_mask_i32gather_epi32(v1_old, mask, index, addr, scale) \
-  (__m512i)__builtin_ia32_gathersiv16si((__v16si)(__m512i)(v1_old), \
-                                        (void const *)(addr), \
-                                        (__v16si)(__m512i)(index), \
-                                        (__mmask16)(mask), (int)(scale))
-
-#define _mm512_i32gather_pd(index, addr, scale) \
-  (__m512d)__builtin_ia32_gathersiv8df((__v8df)_mm512_undefined_pd(), \
-                                       (void const *)(addr), \
-                                       (__v8si)(__m256i)(index), (__mmask8)-1, \
-                                       (int)(scale))
-
-#define _mm512_mask_i32gather_pd(v1_old, mask, index, addr, scale) \
-  (__m512d)__builtin_ia32_gathersiv8df((__v8df)(__m512d)(v1_old), \
-                                       (void const *)(addr), \
-                                       (__v8si)(__m256i)(index), \
-                                       (__mmask8)(mask), (int)(scale))
-
-#define _mm512_i32gather_epi64(index, addr, scale) \
-  (__m512i)__builtin_ia32_gathersiv8di((__v8di)_mm512_undefined_epi32(), \
-                                       (void const *)(addr), \
-                                       (__v8si)(__m256i)(index), (__mmask8)-1, \
-                                       (int)(scale))
-
-#define _mm512_mask_i32gather_epi64(v1_old, mask, index, addr, scale) \
-  (__m512i)__builtin_ia32_gathersiv8di((__v8di)(__m512i)(v1_old), \
-                                       (void const *)(addr), \
-                                       (__v8si)(__m256i)(index), \
-                                       (__mmask8)(mask), (int)(scale))
-
-#define _mm512_i64scatter_ps(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv16sf((void *)(addr), (__mmask8)-1, \
-                                (__v8di)(__m512i)(index), \
-                                (__v8sf)(__m256)(v1), (int)(scale))
-
-#define _mm512_mask_i64scatter_ps(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv16sf((void *)(addr), (__mmask8)(mask), \
-                                (__v8di)(__m512i)(index), \
-                                (__v8sf)(__m256)(v1), (int)(scale))
-
-#define _mm512_i64scatter_epi32(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv16si((void *)(addr), (__mmask8)-1, \
-                                (__v8di)(__m512i)(index), \
-                                (__v8si)(__m256i)(v1), (int)(scale))
-
-#define _mm512_mask_i64scatter_epi32(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv16si((void *)(addr), (__mmask8)(mask), \
-                                (__v8di)(__m512i)(index), \
-                                (__v8si)(__m256i)(v1), (int)(scale))
-
-#define _mm512_i64scatter_pd(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv8df((void *)(addr), (__mmask8)-1, \
-                               (__v8di)(__m512i)(index), \
-                               (__v8df)(__m512d)(v1), (int)(scale))
-
-#define _mm512_mask_i64scatter_pd(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv8df((void *)(addr), (__mmask8)(mask), \
-                               (__v8di)(__m512i)(index), \
-                               (__v8df)(__m512d)(v1), (int)(scale))
-
-#define _mm512_i64scatter_epi64(addr, index, v1, scale) \
-  __builtin_ia32_scatterdiv8di((void *)(addr), (__mmask8)-1, \
-                               (__v8di)(__m512i)(index), \
-                               (__v8di)(__m512i)(v1), (int)(scale))
-
-#define _mm512_mask_i64scatter_epi64(addr, mask, index, v1, scale) \
-  __builtin_ia32_scatterdiv8di((void *)(addr), (__mmask8)(mask), \
-                               (__v8di)(__m512i)(index), \
-                               (__v8di)(__m512i)(v1), (int)(scale))
-
-#define _mm512_i32scatter_ps(addr, index, v1, scale) \
-  __builtin_ia32_scattersiv16sf((void *)(addr), (__mmask16)-1, \
-                                (__v16si)(__m512i)(index), \
-                                (__v16sf)(__m512)(v1), (int)(scale))
-
-#define _mm512_mask_i32scatter_ps(addr, mask, index, v1, scale) \
-  __builtin_ia32_scattersiv16sf((void *)(addr), (__mmask16)(mask), \
-                                (__v16si)(__m512i)(index), \
-                                (__v16sf)(__m512)(v1), (int)(scale))
-
-#define _mm512_i32scatter_epi32(addr, index, v1, scale) \
-  __builtin_ia32_scattersiv16si((void *)(addr), (__mmask16)-1, \
-                                (__v16si)(__m512i)(index), \
-                                (__v16si)(__m512i)(v1), (int)(scale))
-
-#define _mm512_mask_i32scatter_epi32(addr, mask, index, v1, scale) \
-  __builtin_ia32_scattersiv16si((void *)(addr), (__mmask16)(mask), \
-                                (__v16si)(__m512i)(index), \
-                                (__v16si)(__m512i)(v1), (int)(scale))
-
-#define _mm512_i32scatter_pd(addr, index, v1, scale) \
-  __builtin_ia32_scattersiv8df((void *)(addr), (__mmask8)-1, \
-                               (__v8si)(__m256i)(index), \
-                               (__v8df)(__m512d)(v1), (int)(scale))
-
-#define _mm512_mask_i32scatter_pd(addr, mask, index, v1, scale) \
-  __builtin_ia32_scattersiv8df((void *)(addr), (__mmask8)(mask), \
-                               (__v8si)(__m256i)(index), \
-                               (__v8df)(__m512d)(v1), (int)(scale))
-
-#define _mm512_i32scatter_epi64(addr, index, v1, scale) \
-  __builtin_ia32_scattersiv8di((void *)(addr), (__mmask8)-1, \
-                               (__v8si)(__m256i)(index), \
-                               (__v8di)(__m512i)(v1), (int)(scale))
-
-#define _mm512_mask_i32scatter_epi64(addr, mask, index, v1, scale) \
-  __builtin_ia32_scattersiv8di((void *)(addr), (__mmask8)(mask), \
-                               (__v8si)(__m256i)(index), \
-                               (__v8di)(__m512i)(v1), (int)(scale))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_vfmaddss3_mask((__v4sf)__W,
-                                       (__v4sf)__A,
-                                       (__v4sf)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fmadd_round_ss(A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
-                                        (__v4sf)(__m128)(B), \
-                                        (__v4sf)(__m128)(C), (__mmask8)-1, \
-                                        (int)(R))
-
-#define _mm_mask_fmadd_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-                                        (__v4sf)(__m128)(A), \
-                                        (__v4sf)(__m128)(B), (__mmask8)(U), \
-                                        (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A,
-                                        (__v4sf)__B,
-                                        (__v4sf)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fmadd_round_ss(U, A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         (__v4sf)(__m128)(C), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmaddss3_mask3((__v4sf)__W,
-                                        (__v4sf)__X,
-                                        (__v4sf)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fmadd_round_ss(W, X, Y, U, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
-                                         (__v4sf)(__m128)(X), \
-                                         (__v4sf)(__m128)(Y), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_vfmaddss3_mask((__v4sf)__W,
-                                       (__v4sf)__A,
-                                       -(__v4sf)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fmsub_round_ss(A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
-                                        (__v4sf)(__m128)(B), \
-                                        -(__v4sf)(__m128)(C), (__mmask8)-1, \
-                                        (int)(R))
-
-#define _mm_mask_fmsub_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-                                        (__v4sf)(__m128)(A), \
-                                        -(__v4sf)(__m128)(B), (__mmask8)(U), \
-                                        (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A,
-                                        (__v4sf)__B,
-                                        -(__v4sf)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fmsub_round_ss(U, A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
-                                         (__v4sf)(__m128)(B), \
-                                         -(__v4sf)(__m128)(C), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmsubss3_mask3((__v4sf)__W,
-                                        (__v4sf)__X,
-                                        (__v4sf)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fmsub_round_ss(W, X, Y, U, R) \
-  (__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
-                                         (__v4sf)(__m128)(X), \
-                                         (__v4sf)(__m128)(Y), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fnmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_vfmaddss3_mask((__v4sf)__W,
-                                       -(__v4sf)__A,
-                                       (__v4sf)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fnmadd_round_ss(A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
-                                        -(__v4sf)(__m128)(B), \
-                                        (__v4sf)(__m128)(C), (__mmask8)-1, \
-                                        (int)(R))
-
-#define _mm_mask_fnmadd_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-                                        -(__v4sf)(__m128)(A), \
-                                        (__v4sf)(__m128)(B), (__mmask8)(U), \
-                                        (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A,
-                                        -(__v4sf)__B,
-                                        (__v4sf)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fnmadd_round_ss(U, A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
-                                         -(__v4sf)(__m128)(B), \
-                                         (__v4sf)(__m128)(C), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmaddss3_mask3((__v4sf)__W,
-                                        -(__v4sf)__X,
-                                        (__v4sf)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fnmadd_round_ss(W, X, Y, U, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
-                                         -(__v4sf)(__m128)(X), \
-                                         (__v4sf)(__m128)(Y), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_fnmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_vfmaddss3_mask((__v4sf)__W,
-                                       -(__v4sf)__A,
-                                       -(__v4sf)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fnmsub_round_ss(A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
-                                        -(__v4sf)(__m128)(B), \
-                                        -(__v4sf)(__m128)(C), (__mmask8)-1, \
-                                        (int)(R))
-
-#define _mm_mask_fnmsub_round_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
-                                        -(__v4sf)(__m128)(A), \
-                                        -(__v4sf)(__m128)(B), (__mmask8)(U), \
-                                        (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
-{
-  return __builtin_ia32_vfmaddss3_maskz((__v4sf)__A,
-                                        -(__v4sf)__B,
-                                        -(__v4sf)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fnmsub_round_ss(U, A, B, C, R) \
-  (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
-                                         -(__v4sf)(__m128)(B), \
-                                         -(__v4sf)(__m128)(C), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmsubss3_mask3((__v4sf)__W,
-                                        -(__v4sf)__X,
-                                        (__v4sf)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fnmsub_round_ss(W, X, Y, U, R) \
-  (__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
-                                         -(__v4sf)(__m128)(X), \
-                                         (__v4sf)(__m128)(Y), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_vfmaddsd3_mask((__v2df)__W,
-                                       (__v2df)__A,
-                                       (__v2df)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fmadd_round_sd(A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
-                                         (__v2df)(__m128d)(B), \
-                                         (__v2df)(__m128d)(C), (__mmask8)-1, \
-                                         (int)(R))
-
-#define _mm_mask_fmadd_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-                                         (__v2df)(__m128d)(A), \
-                                         (__v2df)(__m128d)(B), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A,
-                                        (__v2df)__B,
-                                        (__v2df)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fmadd_round_sd(U, A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          (__v2df)(__m128d)(C), (__mmask8)(U), \
-                                          (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmaddsd3_mask3((__v2df)__W,
-                                        (__v2df)__X,
-                                        (__v2df)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fmadd_round_sd(W, X, Y, U, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
-                                          (__v2df)(__m128d)(X), \
-                                          (__v2df)(__m128d)(Y), (__mmask8)(U), \
-                                          (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_vfmaddsd3_mask((__v2df)__W,
-                                       (__v2df)__A,
-                                       -(__v2df)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fmsub_round_sd(A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
-                                         (__v2df)(__m128d)(B), \
-                                         -(__v2df)(__m128d)(C), (__mmask8)-1, \
-                                         (int)(R))
-
-#define _mm_mask_fmsub_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-                                         (__v2df)(__m128d)(A), \
-                                         -(__v2df)(__m128d)(B), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A,
-                                        (__v2df)__B,
-                                        -(__v2df)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fmsub_round_sd(U, A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
-                                          (__v2df)(__m128d)(B), \
-                                          -(__v2df)(__m128d)(C), \
-                                          (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmsubsd3_mask3((__v2df)__W,
-                                        (__v2df)__X,
-                                        (__v2df)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fmsub_round_sd(W, X, Y, U, R) \
-  (__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
-                                          (__v2df)(__m128d)(X), \
-                                          (__v2df)(__m128d)(Y), \
-                                          (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fnmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_vfmaddsd3_mask((__v2df)__W,
-                                       -(__v2df)__A,
-                                       (__v2df)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fnmadd_round_sd(A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
-                                         -(__v2df)(__m128d)(B), \
-                                         (__v2df)(__m128d)(C), (__mmask8)-1, \
-                                         (int)(R))
-
-#define _mm_mask_fnmadd_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-                                         -(__v2df)(__m128d)(A), \
-                                         (__v2df)(__m128d)(B), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A,
-                                        -(__v2df)__B,
-                                        (__v2df)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fnmadd_round_sd(U, A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
-                                          -(__v2df)(__m128d)(B), \
-                                          (__v2df)(__m128d)(C), (__mmask8)(U), \
-                                          (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmaddsd3_mask3((__v2df)__W,
-                                        -(__v2df)__X,
-                                        (__v2df)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fnmadd_round_sd(W, X, Y, U, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
-                                          -(__v2df)(__m128d)(X), \
-                                          (__v2df)(__m128d)(Y), (__mmask8)(U), \
-                                          (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_fnmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_vfmaddsd3_mask((__v2df)__W,
-                                       -(__v2df)__A,
-                                       -(__v2df)__B,
-                                       (__mmask8)__U,
-                                       _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_fnmsub_round_sd(A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
-                                         -(__v2df)(__m128d)(B), \
-                                         -(__v2df)(__m128d)(C), (__mmask8)-1, \
-                                         (int)(R))
-
-#define _mm_mask_fnmsub_round_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
-                                         -(__v2df)(__m128d)(A), \
-                                         -(__v2df)(__m128d)(B), (__mmask8)(U), \
-                                         (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_fnmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
-{
-  return __builtin_ia32_vfmaddsd3_maskz((__v2df)__A,
-                                        -(__v2df)__B,
-                                        -(__v2df)__C,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_maskz_fnmsub_round_sd(U, A, B, C, R) \
-  (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
-                                          -(__v2df)(__m128d)(B), \
-                                          -(__v2df)(__m128d)(C), \
-                                          (__mmask8)(U), \
-                                          (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask3_fnmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
-{
-  return __builtin_ia32_vfmsubsd3_mask3((__v2df)__W,
-                                        -(__v2df)__X,
-                                        (__v2df)__Y,
-                                        (__mmask8)__U,
-                                        _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_mask3_fnmsub_round_sd(W, X, Y, U, R) \
-  (__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
-                                          -(__v2df)(__m128d)(X), \
-                                          (__v2df)(__m128d)(Y), \
-                                          (__mmask8)(U), (int)(R))
-
-#define _mm512_permutex_pd(X, C) \
-  (__m512d)__builtin_ia32_permdf512((__v8df)(__m512d)(X), (int)(C))
-
-#define _mm512_mask_permutex_pd(W, U, X, C) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_permutex_pd((X), (C)), \
-                                       (__v8df)(__m512d)(W))
-
-#define _mm512_maskz_permutex_pd(U, X, C) \
-  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
-                                       (__v8df)_mm512_permutex_pd((X), (C)), \
-                                       (__v8df)_mm512_setzero_pd())
-
-#define _mm512_permutex_epi64(X, C) \
-  (__m512i)__builtin_ia32_permdi512((__v8di)(__m512i)(X), (int)(C))
-
-#define _mm512_mask_permutex_epi64(W, U, X, C) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_permutex_epi64((X), (C)), \
-                                      (__v8di)(__m512i)(W))
-
-#define _mm512_maskz_permutex_epi64(U, X, C) \
-  (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
-                                      (__v8di)_mm512_permutex_epi64((X), (C)), \
-                                      (__v8di)_mm512_setzero_si512())
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_permutexvar_pd (__m512i __X, __m512d __Y)
-{
-  return (__m512d)__builtin_ia32_permvardf512((__v8df) __Y, (__v8di) __X);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_permutexvar_pd (__m512d __W, __mmask8 __U, __m512i __X, __m512d __Y)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                        (__v8df)_mm512_permutexvar_pd(__X, __Y),
-                                        (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutexvar_pd (__mmask8 __U, __m512i __X, __m512d __Y)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                        (__v8df)_mm512_permutexvar_pd(__X, __Y),
-                                        (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutexvar_epi64 (__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_permvardi512((__v8di)__Y, (__v8di)__X);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutexvar_epi64 (__mmask8 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                     (__v8di)_mm512_permutexvar_epi64(__X, __Y),
-                                     (__v8di)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutexvar_epi64 (__m512i __W, __mmask8 __M, __m512i __X,
-             __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
-                                     (__v8di)_mm512_permutexvar_epi64(__X, __Y),
-                                     (__v8di)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_permutexvar_ps (__m512i __X, __m512 __Y)
-{
-  return (__m512)__builtin_ia32_permvarsf512((__v16sf)__Y, (__v16si)__X);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_permutexvar_ps (__m512 __W, __mmask16 __U, __m512i __X, __m512 __Y)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                       (__v16sf)_mm512_permutexvar_ps(__X, __Y),
-                                       (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutexvar_ps (__mmask16 __U, __m512i __X, __m512 __Y)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                       (__v16sf)_mm512_permutexvar_ps(__X, __Y),
-                                       (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_permutexvar_epi32 (__m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_permvarsi512((__v16si)__Y, (__v16si)__X);
-}
-
-#define _mm512_permutevar_epi32 _mm512_permutexvar_epi32
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_permutexvar_epi32 (__mmask16 __M, __m512i __X, __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                    (__v16si)_mm512_permutexvar_epi32(__X, __Y),
-                                    (__v16si)_mm512_setzero_si512());
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_permutexvar_epi32 (__m512i __W, __mmask16 __M, __m512i __X,
-             __m512i __Y)
-{
-  return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
-                                    (__v16si)_mm512_permutexvar_epi32(__X, __Y),
-                                    (__v16si)__W);
-}
-
-#define _mm512_mask_permutevar_epi32 _mm512_mask_permutexvar_epi32
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kand (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_kandhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kandn (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_kandnhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kor (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_korhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm512_kortestc (__mmask16 __A, __mmask16 __B)
-{
-  return __builtin_ia32_kortestchi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm512_kortestz (__mmask16 __A, __mmask16 __B)
-{
-  return __builtin_ia32_kortestzhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestc_mask16_u8(__mmask16 __A, __mmask16 __B)
-{
-  return (unsigned char)__builtin_ia32_kortestchi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestz_mask16_u8(__mmask16 __A, __mmask16 __B)
-{
-  return (unsigned char)__builtin_ia32_kortestzhi(__A, __B);
-}
-
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortest_mask16_u8(__mmask16 __A, __mmask16 __B, unsigned char *__C) {
-  *__C = (unsigned char)__builtin_ia32_kortestchi(__A, __B);
-  return (unsigned char)__builtin_ia32_kortestzhi(__A, __B);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kunpackb (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_kunpckhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kxnor (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_kxnorhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_mm512_kxor (__mmask16 __A, __mmask16 __B)
-{
-  return (__mmask16) __builtin_ia32_kxorhi ((__mmask16) __A, (__mmask16) __B);
-}
-
-#define _kand_mask16 _mm512_kand
-#define _kandn_mask16 _mm512_kandn
-#define _knot_mask16 _mm512_knot
-#define _kor_mask16 _mm512_kor
-#define _kxnor_mask16 _mm512_kxnor
-#define _kxor_mask16 _mm512_kxor
-
-#define _kshiftli_mask16(A, I) \
-  (__mmask16)__builtin_ia32_kshiftlihi((__mmask16)(A), (unsigned int)(I))
-
-#define _kshiftri_mask16(A, I) \
-  (__mmask16)__builtin_ia32_kshiftrihi((__mmask16)(A), (unsigned int)(I))
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_cvtmask16_u32(__mmask16 __A) {
-  return (unsigned int)__builtin_ia32_kmovw((__mmask16)__A);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_cvtu32_mask16(unsigned int __A) {
-  return (__mmask16)__builtin_ia32_kmovw((__mmask16)__A);
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS
-_load_mask16(__mmask16 *__A) {
-  return (__mmask16)__builtin_ia32_kmovw(*(__mmask16 *)__A);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS
-_store_mask16(__mmask16 *__A, __mmask16 __B) {
-  *(__mmask16 *)__A = __builtin_ia32_kmovw((__mmask16)__B);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_stream_si512 (void * __P, __m512i __A)
-{
-  typedef __v8di __v8di_aligned __attribute__((aligned(64)));
-  __builtin_nontemporal_store((__v8di_aligned)__A, (__v8di_aligned*)__P);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_stream_load_si512 (void const *__P)
-{
-  typedef __v8di __v8di_aligned __attribute__((aligned(64)));
-  return (__m512i) __builtin_nontemporal_load((const __v8di_aligned *)__P);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_stream_pd (void *__P, __m512d __A)
-{
-  typedef __v8df __v8df_aligned __attribute__((aligned(64)));
-  __builtin_nontemporal_store((__v8df_aligned)__A, (__v8df_aligned*)__P);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_stream_ps (void *__P, __m512 __A)
-{
-  typedef __v16sf __v16sf_aligned __attribute__((aligned(64)));
-  __builtin_nontemporal_store((__v16sf_aligned)__A, (__v16sf_aligned*)__P);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_compress_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A,
-                  (__v8df) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_compress_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_compressdf512_mask ((__v8df) __A,
-                  (__v8df)
-                  _mm512_setzero_pd (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_compress_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A,
-                  (__v8di) __W,
-                  (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_compress_epi64 (__mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_compressdi512_mask ((__v8di) __A,
-                  (__v8di)
-                  _mm512_setzero_si512 (),
-                  (__mmask8) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_compress_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A,
-                 (__v16sf) __W,
-                 (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_compress_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_compresssf512_mask ((__v16sf) __A,
-                 (__v16sf)
-                 _mm512_setzero_ps (),
-                 (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_compress_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A,
-                  (__v16si) __W,
-                  (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_compress_epi32 (__mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_compresssi512_mask ((__v16si) __A,
-                  (__v16si)
-                  _mm512_setzero_si512 (),
-                  (__mmask16) __U);
-}
-
-#define _mm_cmp_round_ss_mask(X, Y, P, R) \
-  (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
-                                      (__v4sf)(__m128)(Y), (int)(P), \
-                                      (__mmask8)-1, (int)(R))
-
-#define _mm_mask_cmp_round_ss_mask(M, X, Y, P, R) \
-  (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
-                                      (__v4sf)(__m128)(Y), (int)(P), \
-                                      (__mmask8)(M), (int)(R))
-
-#define _mm_cmp_ss_mask(X, Y, P) \
-  (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
-                                      (__v4sf)(__m128)(Y), (int)(P), \
-                                      (__mmask8)-1, \
-                                      _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_cmp_ss_mask(M, X, Y, P) \
-  (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
-                                      (__v4sf)(__m128)(Y), (int)(P), \
-                                      (__mmask8)(M), \
-                                      _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_cmp_round_sd_mask(X, Y, P, R) \
-  (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
-                                      (__v2df)(__m128d)(Y), (int)(P), \
-                                      (__mmask8)-1, (int)(R))
-
-#define _mm_mask_cmp_round_sd_mask(M, X, Y, P, R) \
-  (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
-                                      (__v2df)(__m128d)(Y), (int)(P), \
-                                      (__mmask8)(M), (int)(R))
-
-#define _mm_cmp_sd_mask(X, Y, P) \
-  (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
-                                      (__v2df)(__m128d)(Y), (int)(P), \
-                                      (__mmask8)-1, \
-                                      _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_cmp_sd_mask(M, X, Y, P) \
-  (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
-                                      (__v2df)(__m128d)(Y), (int)(P), \
-                                      (__mmask8)(M), \
-                                      _MM_FROUND_CUR_DIRECTION)
-
-/* Bit Test */
-
-static __inline __mmask16 __DEFAULT_FN_ATTRS512
-_mm512_test_epi32_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpneq_epi32_mask (_mm512_and_epi32(__A, __B),
-                                   _mm512_setzero_si512());
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
-_mm512_mask_test_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpneq_epi32_mask (__U, _mm512_and_epi32 (__A, __B),
-                                        _mm512_setzero_si512());
-}
-
-static __inline __mmask8 __DEFAULT_FN_ATTRS512
-_mm512_test_epi64_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpneq_epi64_mask (_mm512_and_epi32 (__A, __B),
-                                   _mm512_setzero_si512());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS512
-_mm512_mask_test_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpneq_epi64_mask (__U, _mm512_and_epi32 (__A, __B),
-                                        _mm512_setzero_si512());
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
-_mm512_testn_epi32_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpeq_epi32_mask (_mm512_and_epi32 (__A, __B),
-                                  _mm512_setzero_si512());
-}
-
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
-_mm512_mask_testn_epi32_mask (__mmask16 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpeq_epi32_mask (__U, _mm512_and_epi32 (__A, __B),
-                                       _mm512_setzero_si512());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS512
-_mm512_testn_epi64_mask (__m512i __A, __m512i __B)
-{
-  return _mm512_cmpeq_epi64_mask (_mm512_and_epi32 (__A, __B),
-                                  _mm512_setzero_si512());
-}
-
-static __inline__ __mmask8 __DEFAULT_FN_ATTRS512
-_mm512_mask_testn_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B)
-{
-  return _mm512_mask_cmpeq_epi64_mask (__U, _mm512_and_epi32 (__A, __B),
-                                       _mm512_setzero_si512());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_movehdup_ps (__m512 __A)
-{
-  return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A,
-                         1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_movehdup_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_movehdup_ps(__A),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_movehdup_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_movehdup_ps(__A),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_moveldup_ps (__m512 __A)
-{
-  return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A,
-                         0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_moveldup_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_moveldup_ps(__A),
-                                             (__v16sf)__W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_moveldup_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
-                                             (__v16sf)_mm512_moveldup_ps(__A),
-                                             (__v16sf)_mm512_setzero_ps());
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_move_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_selectss_128(__U, _mm_move_ss(__A, __B), __W);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_move_ss (__mmask8 __U, __m128 __A, __m128 __B)
-{
-  return __builtin_ia32_selectss_128(__U, _mm_move_ss(__A, __B),
-                                     _mm_setzero_ps());
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_move_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_selectsd_128(__U, _mm_move_sd(__A, __B), __W);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_move_sd (__mmask8 __U, __m128d __A, __m128d __B)
-{
-  return __builtin_ia32_selectsd_128(__U, _mm_move_sd(__A, __B),
-                                     _mm_setzero_pd());
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_store_ss (float * __W, __mmask8 __U, __m128 __A)
-{
-  __builtin_ia32_storess128_mask ((__v4sf *)__W, __A, __U & 1);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_store_sd (double * __W, __mmask8 __U, __m128d __A)
-{
-  __builtin_ia32_storesd128_mask ((__v2df *)__W, __A, __U & 1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_load_ss (__m128 __W, __mmask8 __U, const float* __A)
-{
-  __m128 src = (__v4sf) __builtin_shufflevector((__v4sf) __W,
-                                                (__v4sf)_mm_setzero_ps(),
-                                                0, 4, 4, 4);
-
-  return (__m128) __builtin_ia32_loadss128_mask ((__v4sf *) __A, src, __U & 1);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_load_ss (__mmask8 __U, const float* __A)
-{
-  return (__m128)__builtin_ia32_loadss128_mask ((__v4sf *) __A,
-                                                (__v4sf) _mm_setzero_ps(),
-                                                __U & 1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_load_sd (__m128d __W, __mmask8 __U, const double* __A)
-{
-  __m128d src = (__v2df) __builtin_shufflevector((__v2df) __W,
-                                                 (__v2df)_mm_setzero_pd(),
-                                                 0, 2);
-
-  return (__m128d) __builtin_ia32_loadsd128_mask ((__v2df *) __A, src, __U & 1);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_load_sd (__mmask8 __U, const double* __A)
-{
-  return (__m128d) __builtin_ia32_loadsd128_mask ((__v2df *) __A,
-                                                  (__v2df) _mm_setzero_pd(),
-                                                  __U & 1);
-}
-
-#define _mm512_shuffle_epi32(A, I) \
-  (__m512i)__builtin_ia32_pshufd512((__v16si)(__m512i)(A), (int)(I))
-
-#define _mm512_mask_shuffle_epi32(W, U, A, I) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_shuffle_epi32((A), (I)), \
-                                      (__v16si)(__m512i)(W))
-
-#define _mm512_maskz_shuffle_epi32(U, A, I) \
-  (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
-                                      (__v16si)_mm512_shuffle_epi32((A), (I)), \
-                                      (__v16si)_mm512_setzero_si512())
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_expand_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A,
-                (__v8df) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_expand_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_expanddf512_mask ((__v8df) __A,
-                (__v8df) _mm512_setzero_pd (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_expand_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A,
-                (__v8di) __W,
-                (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_expand_epi64 ( __mmask8 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_expanddi512_mask ((__v8di) __A,
-                (__v8di) _mm512_setzero_si512 (),
-                (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_expandloadu_pd(__m512d __W, __mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_expandloaddf512_mask ((const __v8df *)__P,
-              (__v8df) __W,
-              (__mmask8) __U);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_expandloadu_pd(__mmask8 __U, void const *__P)
-{
-  return (__m512d) __builtin_ia32_expandloaddf512_mask ((const __v8df *)__P,
-              (__v8df) _mm512_setzero_pd(),
-              (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_expandloadu_epi64(__m512i __W, __mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P,
-              (__v8di) __W,
-              (__mmask8) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_expandloadu_epi64(__mmask8 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_expandloaddi512_mask ((const __v8di *)__P,
-              (__v8di) _mm512_setzero_si512(),
-              (__mmask8) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_expandloadu_ps(__m512 __W, __mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_expandloadsf512_mask ((const __v16sf *)__P,
-                   (__v16sf) __W,
-                   (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_expandloadu_ps(__mmask16 __U, void const *__P)
-{
-  return (__m512) __builtin_ia32_expandloadsf512_mask ((const __v16sf *)__P,
-                   (__v16sf) _mm512_setzero_ps(),
-                   (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_expandloadu_epi32(__m512i __W, __mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P,
-              (__v16si) __W,
-              (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_expandloadu_epi32(__mmask16 __U, void const *__P)
-{
-  return (__m512i) __builtin_ia32_expandloadsi512_mask ((const __v16si *)__P,
-              (__v16si) _mm512_setzero_si512(),
-              (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_expand_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A,
-               (__v16sf) __W,
-               (__mmask16) __U);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_expand_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_expandsf512_mask ((__v16sf) __A,
-               (__v16sf) _mm512_setzero_ps(),
-               (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_expand_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A,
-                (__v16si) __W,
-                (__mmask16) __U);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_expand_epi32 (__mmask16 __U, __m512i __A)
-{
-  return (__m512i) __builtin_ia32_expandsi512_mask ((__v16si) __A,
-                (__v16si) _mm512_setzero_si512(),
-                (__mmask16) __U);
-}
-
-#define _mm512_cvt_roundps_pd(A, R) \
-  (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
-                                           (__v8df)_mm512_undefined_pd(), \
-                                           (__mmask8)-1, (int)(R))
-
-#define _mm512_mask_cvt_roundps_pd(W, U, A, R) \
-  (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
-                                           (__v8df)(__m512d)(W), \
-                                           (__mmask8)(U), (int)(R))
-
-#define _mm512_maskz_cvt_roundps_pd(U, A, R) \
-  (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
-                                           (__v8df)_mm512_setzero_pd(), \
-                                           (__mmask8)(U), (int)(R))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtps_pd (__m256 __A)
-{
-  return (__m512d) __builtin_convertvector((__v8sf)__A, __v8df);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtps_pd (__m512d __W, __mmask8 __U, __m256 __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_cvtps_pd(__A),
-                                              (__v8df)__W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_cvtps_pd (__mmask8 __U, __m256 __A)
-{
-  return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
-                                              (__v8df)_mm512_cvtps_pd(__A),
-                                              (__v8df)_mm512_setzero_pd());
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_cvtpslo_pd (__m512 __A)
-{
-  return (__m512d) _mm512_cvtps_pd(_mm512_castps512_ps256(__A));
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_cvtpslo_pd (__m512d __W, __mmask8 __U, __m512 __A)
-{
-  return (__m512d) _mm512_mask_cvtps_pd(__W, __U, _mm512_castps512_ps256(__A));
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_pd (__m512d __W, __mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
-              (__v8df) __A,
-              (__v8df) __W);
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_pd (__mmask8 __U, __m512d __A)
-{
-  return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
-              (__v8df) __A,
-              (__v8df) _mm512_setzero_pd ());
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_mov_ps (__m512 __W, __mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
-             (__v16sf) __A,
-             (__v16sf) __W);
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_maskz_mov_ps (__mmask16 __U, __m512 __A)
-{
-  return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
-             (__v16sf) __A,
-             (__v16sf) _mm512_setzero_ps ());
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_compressstoreu_pd (void *__P, __mmask8 __U, __m512d __A)
-{
-  __builtin_ia32_compressstoredf512_mask ((__v8df *) __P, (__v8df) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_compressstoreu_epi64 (void *__P, __mmask8 __U, __m512i __A)
-{
-  __builtin_ia32_compressstoredi512_mask ((__v8di *) __P, (__v8di) __A,
-            (__mmask8) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_compressstoreu_ps (void *__P, __mmask16 __U, __m512 __A)
-{
-  __builtin_ia32_compressstoresf512_mask ((__v16sf *) __P, (__v16sf) __A,
-            (__mmask16) __U);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS512
-_mm512_mask_compressstoreu_epi32 (void *__P, __mmask16 __U, __m512i __A)
-{
-  __builtin_ia32_compressstoresi512_mask ((__v16si *) __P, (__v16si) __A,
-            (__mmask16) __U);
-}
-
-#define _mm_cvt_roundsd_ss(A, B, R) \
-  (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
-                                             (__v2df)(__m128d)(B), \
-                                             (__v4sf)_mm_undefined_ps(), \
-                                             (__mmask8)-1, (int)(R))
-
-#define _mm_mask_cvt_roundsd_ss(W, U, A, B, R) \
-  (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
-                                             (__v2df)(__m128d)(B), \
-                                             (__v4sf)(__m128)(W), \
-                                             (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_cvt_roundsd_ss(U, A, B, R) \
-  (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
-                                             (__v2df)(__m128d)(B), \
-                                             (__v4sf)_mm_setzero_ps(), \
-                                             (__mmask8)(U), (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128d __B)
-{
-  return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)__A,
-                                             (__v2df)__B,
-                                             (__v4sf)__W,
-                                             (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B)
-{
-  return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)__A,
-                                             (__v2df)__B,
-                                             (__v4sf)_mm_setzero_ps(),
-                                             (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-#define _mm_cvtss_i32 _mm_cvtss_si32
-#define _mm_cvtsd_i32 _mm_cvtsd_si32
-#define _mm_cvti32_sd _mm_cvtsi32_sd
-#define _mm_cvti32_ss _mm_cvtsi32_ss
-#ifdef __x86_64__
-#define _mm_cvtss_i64 _mm_cvtss_si64
-#define _mm_cvtsd_i64 _mm_cvtsd_si64
-#define _mm_cvti64_sd _mm_cvtsi64_sd
-#define _mm_cvti64_ss _mm_cvtsi64_ss
-#endif
-
-#ifdef __x86_64__
-#define _mm_cvt_roundi64_sd(A, B, R) \
-  (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
-                                     (int)(R))
-
-#define _mm_cvt_roundsi64_sd(A, B, R) \
-  (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
-                                     (int)(R))
-#endif
-
-#define _mm_cvt_roundsi32_ss(A, B, R) \
-  (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R))
-
-#define _mm_cvt_roundi32_ss(A, B, R) \
-  (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R))
-
-#ifdef __x86_64__
-#define _mm_cvt_roundsi64_ss(A, B, R) \
-  (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
-                                    (int)(R))
-
-#define _mm_cvt_roundi64_ss(A, B, R) \
-  (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
-                                    (int)(R))
-#endif
-
-#define _mm_cvt_roundss_sd(A, B, R) \
-  (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (__v2df)_mm_undefined_pd(), \
-                                              (__mmask8)-1, (int)(R))
-
-#define _mm_mask_cvt_roundss_sd(W, U, A, B, R) \
-  (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (__v2df)(__m128d)(W), \
-                                              (__mmask8)(U), (int)(R))
-
-#define _mm_maskz_cvt_roundss_sd(U, A, B, R) \
-  (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
-                                              (__v4sf)(__m128)(B), \
-                                              (__v2df)_mm_setzero_pd(), \
-                                              (__mmask8)(U), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_mask_cvtss_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128 __B)
-{
-  return __builtin_ia32_cvtss2sd_round_mask((__v2df)__A,
-                                            (__v4sf)__B,
-                                            (__v2df)__W,
-                                            (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtss_sd (__mmask8 __U, __m128d __A, __m128 __B)
-{
-  return __builtin_ia32_cvtss2sd_round_mask((__v2df)__A,
-                                            (__v4sf)__B,
-                                            (__v2df)_mm_setzero_pd(),
-                                            (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
-}
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_cvtu32_sd (__m128d __A, unsigned __B)
-{
-  __A[0] = __B;
-  return __A;
-}
-
-#ifdef __x86_64__
-#define _mm_cvt_roundu64_sd(A, B, R) \
-  (__m128d)__builtin_ia32_cvtusi2sd64((__v2df)(__m128d)(A), \
-                                      (unsigned long long)(B), (int)(R))
-
-static __inline__ __m128d __DEFAULT_FN_ATTRS128
-_mm_cvtu64_sd (__m128d __A, unsigned long long __B)
-{
-  __A[0] = __B;
-  return __A;
-}
-#endif
-
-#define _mm_cvt_roundu32_ss(A, B, R) \
-  (__m128)__builtin_ia32_cvtusi2ss32((__v4sf)(__m128)(A), (unsigned int)(B), \
-                                     (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_cvtu32_ss (__m128 __A, unsigned __B)
-{
-  __A[0] = __B;
-  return __A;
-}
-
-#ifdef __x86_64__
-#define _mm_cvt_roundu64_ss(A, B, R) \
-  (__m128)__builtin_ia32_cvtusi2ss64((__v4sf)(__m128)(A), \
-                                     (unsigned long long)(B), (int)(R))
-
-static __inline__ __m128 __DEFAULT_FN_ATTRS128
-_mm_cvtu64_ss (__m128 __A, unsigned long long __B)
-{
-  __A[0] = __B;
-  return __A;
-}
-#endif
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_set1_epi32 (__m512i __O, __mmask16 __M, int __A)
-{
-  return (__m512i) __builtin_ia32_selectd_512(__M,
-                                              (__v16si) _mm512_set1_epi32(__A),
-                                              (__v16si) __O);
-}
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_set1_epi64 (__m512i __O, __mmask8 __M, long long __A)
-{
-  return (__m512i) __builtin_ia32_selectq_512(__M,
-                                              (__v8di) _mm512_set1_epi64(__A),
-                                              (__v8di) __O);
-}
-
-static  __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set_epi8 (char __e63, char __e62, char __e61, char __e60, char __e59,
-    char __e58, char __e57, char __e56, char __e55, char __e54, char __e53,
-    char __e52, char __e51, char __e50, char __e49, char __e48, char __e47,
-    char __e46, char __e45, char __e44, char __e43, char __e42, char __e41,
-    char __e40, char __e39, char __e38, char __e37, char __e36, char __e35,
-    char __e34, char __e33, char __e32, char __e31, char __e30, char __e29,
-    char __e28, char __e27, char __e26, char __e25, char __e24, char __e23,
-    char __e22, char __e21, char __e20, char __e19, char __e18, char __e17,
-    char __e16, char __e15, char __e14, char __e13, char __e12, char __e11,
-    char __e10, char __e9, char __e8, char __e7, char __e6, char __e5,
-    char __e4, char __e3, char __e2, char __e1, char __e0) {
-
-  return __extension__ (__m512i)(__v64qi)
-    {__e0, __e1, __e2, __e3, __e4, __e5, __e6, __e7,
-     __e8, __e9, __e10, __e11, __e12, __e13, __e14, __e15,
-     __e16, __e17, __e18, __e19, __e20, __e21, __e22, __e23,
-     __e24, __e25, __e26, __e27, __e28, __e29, __e30, __e31,
-     __e32, __e33, __e34, __e35, __e36, __e37, __e38, __e39,
-     __e40, __e41, __e42, __e43, __e44, __e45, __e46, __e47,
-     __e48, __e49, __e50, __e51, __e52, __e53, __e54, __e55,
-     __e56, __e57, __e58, __e59, __e60, __e61, __e62, __e63};
-}
-
-static  __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set_epi16(short __e31, short __e30, short __e29, short __e28,
-    short __e27, short __e26, short __e25, short __e24, short __e23,
-    short __e22, short __e21, short __e20, short __e19, short __e18,
-    short __e17, short __e16, short __e15, short __e14, short __e13,
-    short __e12, short __e11, short __e10, short __e9, short __e8,
-    short __e7, short __e6, short __e5, short __e4, short __e3,
-    short __e2, short __e1, short __e0) {
-  return __extension__ (__m512i)(__v32hi)
-    {__e0, __e1, __e2, __e3, __e4, __e5, __e6, __e7,
-     __e8, __e9, __e10, __e11, __e12, __e13, __e14, __e15,
-     __e16, __e17, __e18, __e19, __e20, __e21, __e22, __e23,
-     __e24, __e25, __e26, __e27, __e28, __e29, __e30, __e31 };
-}
-
-static __inline __m512i __DEFAULT_FN_ATTRS512
-_mm512_set_epi32 (int __A, int __B, int __C, int __D,
-     int __E, int __F, int __G, int __H,
-     int __I, int __J, int __K, int __L,
-     int __M, int __N, int __O, int __P)
-{
-  return __extension__ (__m512i)(__v16si)
-  { __P, __O, __N, __M, __L, __K, __J, __I,
-    __H, __G, __F, __E, __D, __C, __B, __A };
-}
-
-#define _mm512_setr_epi32(e0,e1,e2,e3,e4,e5,e6,e7,           \
-       e8,e9,e10,e11,e12,e13,e14,e15)          \
-  _mm512_set_epi32((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6), \
-                   (e5),(e4),(e3),(e2),(e1),(e0))
-
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_set_epi64 (long long __A, long long __B, long long __C,
-     long long __D, long long __E, long long __F,
-     long long __G, long long __H)
-{
-  return __extension__ (__m512i) (__v8di)
-  { __H, __G, __F, __E, __D, __C, __B, __A };
-}
-
-#define _mm512_setr_epi64(e0,e1,e2,e3,e4,e5,e6,e7)           \
-  _mm512_set_epi64((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0))
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_set_pd (double __A, double __B, double __C, double __D,
-        double __E, double __F, double __G, double __H)
-{
-  return __extension__ (__m512d)
-  { __H, __G, __F, __E, __D, __C, __B, __A };
-}
-
-#define _mm512_setr_pd(e0,e1,e2,e3,e4,e5,e6,e7)              \
-  _mm512_set_pd((e7),(e6),(e5),(e4),(e3),(e2),(e1),(e0))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_set_ps (float __A, float __B, float __C, float __D,
-        float __E, float __F, float __G, float __H,
-        float __I, float __J, float __K, float __L,
-        float __M, float __N, float __O, float __P)
-{
-  return __extension__ (__m512)
-  { __P, __O, __N, __M, __L, __K, __J, __I,
-    __H, __G, __F, __E, __D, __C, __B, __A };
-}
-
-#define _mm512_setr_ps(e0,e1,e2,e3,e4,e5,e6,e7,e8,e9,e10,e11,e12,e13,e14,e15) \
-  _mm512_set_ps((e15),(e14),(e13),(e12),(e11),(e10),(e9),(e8),(e7),(e6),(e5), \
-                (e4),(e3),(e2),(e1),(e0))
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_abs_ps(__m512 __A)
-{
-  return (__m512)_mm512_and_epi32(_mm512_set1_epi32(0x7FFFFFFF),(__m512i)__A) ;
-}
-
-static __inline__ __m512 __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_ps(__m512 __W, __mmask16 __K, __m512 __A)
-{
-  return (__m512)_mm512_mask_and_epi32((__m512i)__W, __K, _mm512_set1_epi32(0x7FFFFFFF),(__m512i)__A) ;
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_abs_pd(__m512d __A)
-{
-  return (__m512d)_mm512_and_epi64(_mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)__A) ;
-}
-
-static __inline__ __m512d __DEFAULT_FN_ATTRS512
-_mm512_mask_abs_pd(__m512d __W, __mmask8 __K, __m512d __A)
-{
-  return (__m512d)_mm512_mask_and_epi64((__v8di)__W, __K, _mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)__A);
-}
-
-/* Vector-reduction arithmetic accepts vectors as inputs and produces scalars as
- * outputs. This class of vector operation forms the basis of many scientific
- * computations. In vector-reduction arithmetic, the evaluation off is
- * independent of the order of the input elements of V.
-
- * Used bisection method. At each step, we partition the vector with previous
- * step in half, and the operation is performed on its two halves.
- * This takes log2(n) steps where n is the number of elements in the vector.
- */
-
-#define _mm512_mask_reduce_operator(op) \
-  __v4du __t1 = (__v4du)_mm512_extracti64x4_epi64(__W, 0); \
-  __v4du __t2 = (__v4du)_mm512_extracti64x4_epi64(__W, 1); \
-  __m256i __t3 = (__m256i)(__t1 op __t2); \
-  __v2du __t4 = (__v2du)_mm256_extracti128_si256(__t3, 0); \
-  __v2du __t5 = (__v2du)_mm256_extracti128_si256(__t3, 1); \
-  __v2du __t6 = __t4 op __t5; \
-  __v2du __t7 = __builtin_shufflevector(__t6, __t6, 1, 0); \
-  __v2du __t8 = __t6 op __t7; \
-  return __t8[0]
-
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_add_epi64(__m512i __W) {
-  _mm512_mask_reduce_operator(+);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_epi64(__m512i __W) {
-  _mm512_mask_reduce_operator(*);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_and_epi64(__m512i __W) {
-  _mm512_mask_reduce_operator(&);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_or_epi64(__m512i __W) {
-  _mm512_mask_reduce_operator(|);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_add_epi64(__mmask8 __M, __m512i __W) {
-  __W = _mm512_maskz_mov_epi64(__M, __W);
-  _mm512_mask_reduce_operator(+);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_mul_epi64(__mmask8 __M, __m512i __W) {
-  __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(1), __M, __W);
-  _mm512_mask_reduce_operator(*);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_and_epi64(__mmask8 __M, __m512i __W) {
-  __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __W);
-  _mm512_mask_reduce_operator(&);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W) {
-  __W = _mm512_maskz_mov_epi64(__M, __W);
-  _mm512_mask_reduce_operator(|);
-}
-#undef _mm512_mask_reduce_operator
-
-#define _mm512_mask_reduce_operator(op) \
-  __m256d __t1 = _mm512_extractf64x4_pd(__W, 0); \
-  __m256d __t2 = _mm512_extractf64x4_pd(__W, 1); \
-  __m256d __t3 = __t1 op __t2; \
-  __m128d __t4 = _mm256_extractf128_pd(__t3, 0); \
-  __m128d __t5 = _mm256_extractf128_pd(__t3, 1); \
-  __m128d __t6 = __t4 op __t5; \
-  __m128d __t7 = __builtin_shufflevector(__t6, __t6, 1, 0); \
-  __m128d __t8 = __t6 op __t7; \
-  return __t8[0]
-
-static __inline__ double __DEFAULT_FN_ATTRS512 _mm512_reduce_add_pd(__m512d __W) {
-  _mm512_mask_reduce_operator(+);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_pd(__m512d __W) {
-  _mm512_mask_reduce_operator(*);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_add_pd(__mmask8 __M, __m512d __W) {
-  __W = _mm512_maskz_mov_pd(__M, __W);
-  _mm512_mask_reduce_operator(+);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_mul_pd(__mmask8 __M, __m512d __W) {
-  __W = _mm512_mask_mov_pd(_mm512_set1_pd(1.0), __M, __W);
-  _mm512_mask_reduce_operator(*);
-}
-#undef _mm512_mask_reduce_operator
-
-#define _mm512_mask_reduce_operator(op) \
-  __v8su __t1 = (__v8su)_mm512_extracti64x4_epi64(__W, 0); \
-  __v8su __t2 = (__v8su)_mm512_extracti64x4_epi64(__W, 1); \
-  __m256i __t3 = (__m256i)(__t1 op __t2); \
-  __v4su __t4 = (__v4su)_mm256_extracti128_si256(__t3, 0); \
-  __v4su __t5 = (__v4su)_mm256_extracti128_si256(__t3, 1); \
-  __v4su __t6 = __t4 op __t5; \
-  __v4su __t7 = __builtin_shufflevector(__t6, __t6, 2, 3, 0, 1); \
-  __v4su __t8 = __t6 op __t7; \
-  __v4su __t9 = __builtin_shufflevector(__t8, __t8, 1, 0, 3, 2); \
-  __v4su __t10 = __t8 op __t9; \
-  return __t10[0]
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_add_epi32(__m512i __W) {
-  _mm512_mask_reduce_operator(+);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_mul_epi32(__m512i __W) {
-  _mm512_mask_reduce_operator(*);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_and_epi32(__m512i __W) {
-  _mm512_mask_reduce_operator(&);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_or_epi32(__m512i __W) {
-  _mm512_mask_reduce_operator(|);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_add_epi32( __mmask16 __M, __m512i __W) {
-  __W = _mm512_maskz_mov_epi32(__M, __W);
-  _mm512_mask_reduce_operator(+);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_mul_epi32( __mmask16 __M, __m512i __W) {
-  __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(1), __M, __W);
-  _mm512_mask_reduce_operator(*);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_and_epi32( __mmask16 __M, __m512i __W) {
-  __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __W);
-  _mm512_mask_reduce_operator(&);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_or_epi32(__mmask16 __M, __m512i __W) {
-  __W = _mm512_maskz_mov_epi32(__M, __W);
-  _mm512_mask_reduce_operator(|);
-}
-#undef _mm512_mask_reduce_operator
-
-#define _mm512_mask_reduce_operator(op) \
-  __m256 __t1 = (__m256)_mm512_extractf64x4_pd((__m512d)__W, 0); \
-  __m256 __t2 = (__m256)_mm512_extractf64x4_pd((__m512d)__W, 1); \
-  __m256 __t3 = __t1 op __t2; \
-  __m128 __t4 = _mm256_extractf128_ps(__t3, 0); \
-  __m128 __t5 = _mm256_extractf128_ps(__t3, 1); \
-  __m128 __t6 = __t4 op __t5; \
-  __m128 __t7 = __builtin_shufflevector(__t6, __t6, 2, 3, 0, 1); \
-  __m128 __t8 = __t6 op __t7; \
-  __m128 __t9 = __builtin_shufflevector(__t8, __t8, 1, 0, 3, 2); \
-  __m128 __t10 = __t8 op __t9; \
-  return __t10[0]
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_reduce_add_ps(__m512 __W) {
-  _mm512_mask_reduce_operator(+);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_reduce_mul_ps(__m512 __W) {
-  _mm512_mask_reduce_operator(*);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_add_ps(__mmask16 __M, __m512 __W) {
-  __W = _mm512_maskz_mov_ps(__M, __W);
-  _mm512_mask_reduce_operator(+);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_mul_ps(__mmask16 __M, __m512 __W) {
-  __W = _mm512_mask_mov_ps(_mm512_set1_ps(1.0f), __M, __W);
-  _mm512_mask_reduce_operator(*);
-}
-#undef _mm512_mask_reduce_operator
-
-#define _mm512_mask_reduce_operator(op) \
-  __m512i __t1 = (__m512i)__builtin_shufflevector((__v8di)__V, (__v8di)__V, 4, 5, 6, 7, 0, 1, 2, 3); \
-  __m512i __t2 = _mm512_##op(__V, __t1); \
-  __m512i __t3 = (__m512i)__builtin_shufflevector((__v8di)__t2, (__v8di)__t2, 2, 3, 0, 1, 6, 7, 4, 5); \
-  __m512i __t4 = _mm512_##op(__t2, __t3); \
-  __m512i __t5 = (__m512i)__builtin_shufflevector((__v8di)__t4, (__v8di)__t4, 1, 0, 3, 2, 5, 4, 7, 6); \
-  __v8di __t6 = (__v8di)_mm512_##op(__t4, __t5); \
-  return __t6[0]
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_epi64(__m512i __V) {
-  _mm512_mask_reduce_operator(max_epi64);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_epu64(__m512i __V) {
-  _mm512_mask_reduce_operator(max_epu64);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_epi64(__m512i __V) {
-  _mm512_mask_reduce_operator(min_epi64);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_epu64(__m512i __V) {
-  _mm512_mask_reduce_operator(min_epu64);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_epi64(__mmask8 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-__LONG_LONG_MAX__ - 1LL), __M, __V);
-  _mm512_mask_reduce_operator(max_epi64);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_epu64(__mmask8 __M, __m512i __V) {
-  __V = _mm512_maskz_mov_epi64(__M, __V);
-  _mm512_mask_reduce_operator(max_epu64);
-}
-
-static __inline__ long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_epi64(__mmask8 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(__LONG_LONG_MAX__), __M, __V);
-  _mm512_mask_reduce_operator(min_epi64);
-}
-
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __V);
-  _mm512_mask_reduce_operator(min_epu64);
-}
-#undef _mm512_mask_reduce_operator
-
-#define _mm512_mask_reduce_operator(op) \
-  __m256i __t1 = _mm512_extracti64x4_epi64(__V, 0); \
-  __m256i __t2 = _mm512_extracti64x4_epi64(__V, 1); \
-  __m256i __t3 = _mm256_##op(__t1, __t2); \
-  __m128i __t4 = _mm256_extracti128_si256(__t3, 0); \
-  __m128i __t5 = _mm256_extracti128_si256(__t3, 1); \
-  __m128i __t6 = _mm_##op(__t4, __t5); \
-  __m128i __t7 = (__m128i)__builtin_shufflevector((__v4si)__t6, (__v4si)__t6, 2, 3, 0, 1); \
-  __m128i __t8 = _mm_##op(__t6, __t7); \
-  __m128i __t9 = (__m128i)__builtin_shufflevector((__v4si)__t8, (__v4si)__t8, 1, 0, 3, 2); \
-  __v4si __t10 = (__v4si)_mm_##op(__t8, __t9); \
-  return __t10[0]
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_epi32(__m512i __V) {
-  _mm512_mask_reduce_operator(max_epi32);
-}
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_epu32(__m512i __V) {
-  _mm512_mask_reduce_operator(max_epu32);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_epi32(__m512i __V) {
-  _mm512_mask_reduce_operator(min_epi32);
-}
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_epu32(__m512i __V) {
-  _mm512_mask_reduce_operator(min_epu32);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(-__INT_MAX__ - 1), __M, __V);
-  _mm512_mask_reduce_operator(max_epi32);
-}
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __V) {
-  __V = _mm512_maskz_mov_epi32(__M, __V);
-  _mm512_mask_reduce_operator(max_epu32);
-}
-
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(__INT_MAX__), __M, __V);
-  _mm512_mask_reduce_operator(min_epi32);
-}
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __V) {
-  __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __V);
-  _mm512_mask_reduce_operator(min_epu32);
-}
-#undef _mm512_mask_reduce_operator
-
-#define _mm512_mask_reduce_operator(op) \
-  __m256d __t1 = _mm512_extractf64x4_pd(__V, 0); \
-  __m256d __t2 = _mm512_extractf64x4_pd(__V, 1); \
-  __m256d __t3 = _mm256_##op(__t1, __t2); \
-  __m128d __t4 = _mm256_extractf128_pd(__t3, 0); \
-  __m128d __t5 = _mm256_extractf128_pd(__t3, 1); \
-  __m128d __t6 = _mm_##op(__t4, __t5); \
-  __m128d __t7 = __builtin_shufflevector(__t6, __t6, 1, 0); \
-  __m128d __t8 = _mm_##op(__t6, __t7); \
-  return __t8[0]
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_pd(__m512d __V) {
-  _mm512_mask_reduce_operator(max_pd);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_pd(__m512d __V) {
-  _mm512_mask_reduce_operator(min_pd);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_pd(__mmask8 __M, __m512d __V) {
-  __V = _mm512_mask_mov_pd(_mm512_set1_pd(-__builtin_inf()), __M, __V);
-  _mm512_mask_reduce_operator(max_pd);
-}
-
-static __inline__ double __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_pd(__mmask8 __M, __m512d __V) {
-  __V = _mm512_mask_mov_pd(_mm512_set1_pd(__builtin_inf()), __M, __V);
-  _mm512_mask_reduce_operator(min_pd);
-}
-#undef _mm512_mask_reduce_operator
-
-#define _mm512_mask_reduce_operator(op) \
-  __m256 __t1 = (__m256)_mm512_extractf64x4_pd((__m512d)__V, 0); \
-  __m256 __t2 = (__m256)_mm512_extractf64x4_pd((__m512d)__V, 1); \
-  __m256 __t3 = _mm256_##op(__t1, __t2); \
-  __m128 __t4 = _mm256_extractf128_ps(__t3, 0); \
-  __m128 __t5 = _mm256_extractf128_ps(__t3, 1); \
-  __m128 __t6 = _mm_##op(__t4, __t5); \
-  __m128 __t7 = __builtin_shufflevector(__t6, __t6, 2, 3, 0, 1); \
-  __m128 __t8 = _mm_##op(__t6, __t7); \
-  __m128 __t9 = __builtin_shufflevector(__t8, __t8, 1, 0, 3, 2); \
-  __m128 __t10 = _mm_##op(__t8, __t9); \
-  return __t10[0]
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_reduce_max_ps(__m512 __V) {
-  _mm512_mask_reduce_operator(max_ps);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_reduce_min_ps(__m512 __V) {
-  _mm512_mask_reduce_operator(min_ps);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_max_ps(__mmask16 __M, __m512 __V) {
-  __V = _mm512_mask_mov_ps(_mm512_set1_ps(-__builtin_inff()), __M, __V);
-  _mm512_mask_reduce_operator(max_ps);
-}
-
-static __inline__ float __DEFAULT_FN_ATTRS512
-_mm512_mask_reduce_min_ps(__mmask16 __M, __m512 __V) {
-  __V = _mm512_mask_mov_ps(_mm512_set1_ps(__builtin_inff()), __M, __V);
-  _mm512_mask_reduce_operator(min_ps);
-}
-#undef _mm512_mask_reduce_operator
-
-/// Moves the least significant 32 bits of a vector of [16 x i32] to a
-///    32-bit signed integer value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
-///
-/// \param __A
-///    A vector of [16 x i32]. The least significant 32 bits are moved to the
-///    destination.
-/// \returns A 32-bit signed integer containing the moved value.
-static __inline__ int __DEFAULT_FN_ATTRS512
-_mm512_cvtsi512_si32(__m512i __A) {
-  __v16si __b = (__v16si)__A;
-  return __b[0];
-}
-
-#undef __DEFAULT_FN_ATTRS512
-#undef __DEFAULT_FN_ATTRS128
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* __AVX512FINTRIN_H */
diff --git a/linux-x86/lib64/clang/10.0.1/include/bmiintrin.h b/linux-x86/lib64/clang/10.0.1/include/bmiintrin.h
deleted file mode 100644
index b7af62f..0000000
--- a/linux-x86/lib64/clang/10.0.1/include/bmiintrin.h
+++ /dev/null
@@ -1,372 +0,0 @@
-/*===---- bmiintrin.h - BMI intrinsics -------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
-#error "Never use <bmiintrin.h> directly; include <x86intrin.h> instead."
-#endif
-
-#ifndef __BMIINTRIN_H
-#define __BMIINTRIN_H
-
-#define _tzcnt_u16(a)     (__tzcnt_u16((a)))
-
-#define _andn_u32(a, b)   (__andn_u32((a), (b)))
-
-/* _bextr_u32 != __bextr_u32 */
-#define _blsi_u32(a)      (__blsi_u32((a)))
-
-#define _blsmsk_u32(a)    (__blsmsk_u32((a)))
-
-#define _blsr_u32(a)      (__blsr_u32((a)))
-
-#define _tzcnt_u32(a)     (__tzcnt_u32((a)))
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi")))
-
-/* Allow using the tzcnt intrinsics even for non-BMI targets. Since the TZCNT
-   instruction behaves as BSF on non-BMI targets, there is code that expects
-   to use it as a potentially faster version of BSF. */
-#define __RELAXED_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
-
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 16-bit integer whose trailing zeros are to be counted.
-/// \returns An unsigned 16-bit integer containing the number of trailing zero
-///    bits in the operand.
-static __inline__ unsigned short __RELAXED_FN_ATTRS
-__tzcnt_u16(unsigned short __X)
-{
-  return __builtin_ia32_tzcnt_u16(__X);
-}
-
-/// Performs a bitwise AND of the second operand with the one's
-///    complement of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> ANDN </c> instruction.
-///
-/// \param __X
-///    An unsigned integer containing one of the operands.
-/// \param __Y
-///    An unsigned integer containing one of the operands.
-/// \returns An unsigned integer containing the bitwise AND of the second
-///    operand with the one's complement of the first operand.
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__andn_u32(unsigned int __X, unsigned int __Y)
-{
-  return ~__X & __Y;
-}
-
-/* AMD-specified, double-leading-underscore version of BEXTR */
-/// Extracts the specified bits from the first operand and returns them
-///    in the least significant bits of the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
-///
-/// \param __X
-///    An unsigned integer whose bits are to be extracted.
-/// \param __Y
-///    An unsigned integer used to specify which bits are extracted. Bits [7:0]
-///    specify the index of the least significant bit. Bits [15:8] specify the
-///    number of bits to be extracted.
-/// \returns An unsigned integer whose least significant bits contain the
-///    extracted bits.
-/// \see _bextr_u32
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__bextr_u32(unsigned int __X, unsigned int __Y)
-{
-  return __builtin_ia32_bextr_u32(__X, __Y);
-}
-
-/* Intel-specified, single-leading-underscore version of BEXTR */
-/// Extracts the specified bits from the first operand and returns them
-///    in the least significant bits of the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
-///
-/// \param __X
-///    An unsigned integer whose bits are to be extracted.
-/// \param __Y
-///    An unsigned integer used to specify the index of the least significant
-///    bit for the bits to be extracted. Bits [7:0] specify the index.
-/// \param __Z
-///    An unsigned integer used to specify the number of bits to be extracted.
-///    Bits [7:0] specify the number of bits.
-/// \returns An unsigned integer whose least significant bits contain the
-///    extracted bits.
-/// \see __bextr_u32
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_bextr_u32(unsigned int __X, unsigned int __Y, unsigned int __Z)
-{
-  return __builtin_ia32_bextr_u32 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
-}
-
-/// Clears all bits in the source except for the least significant bit
-///    containing a value of 1 and returns the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BLSI </c> instruction.
-///
-/// \param __X
-///    An unsigned integer whose bits are to be cleared.
-/// \returns An unsigned integer containing the result of clearing the bits from
-///    the source operand.
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__blsi_u32(unsigned int __X)
-{
-  return __X & -__X;
-}
-
-/// Creates a mask whose bits are set to 1, using bit 0 up to and
-///    including the least significant bit that is set to 1 in the source
-///    operand and returns the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BLSMSK </c> instruction.
-///
-/// \param __X
-///    An unsigned integer used to create the mask.
-/// \returns An unsigned integer containing the newly created mask.
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__blsmsk_u32(unsigned int __X)
-{
-  return __X ^ (__X - 1);
-}
-
-/// Clears the least significant bit that is set to 1 in the source
-///    operand and returns the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BLSR </c> instruction.
-///
-/// \param __X
-///    An unsigned integer containing the operand to be cleared.
-/// \returns An unsigned integer containing the result of clearing the source
-///    operand.
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__blsr_u32(unsigned int __X)
-{
-  return __X & (__X - 1);
-}
-
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 32-bit integer whose trailing zeros are to be counted.
-/// \returns An unsigned 32-bit integer containing the number of trailing zero
-///    bits in the operand.
-static __inline__ unsigned int __RELAXED_FN_ATTRS
-__tzcnt_u32(unsigned int __X)
-{
-  return __builtin_ia32_tzcnt_u32(__X);
-}
-
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 32-bit integer whose trailing zeros are to be counted.
-/// \returns An 32-bit integer containing the number of trailing zero bits in
-///    the operand.
-static __inline__ int __RELAXED_FN_ATTRS
-_mm_tzcnt_32(unsigned int __X)
-{
-  return __builtin_ia32_tzcnt_u32(__X);
-}
-
-#ifdef __x86_64__
-
-#define _andn_u64(a, b)   (__andn_u64((a), (b)))
-
-/* _bextr_u64 != __bextr_u64 */
-#define _blsi_u64(a)      (__blsi_u64((a)))
-
-#define _blsmsk_u64(a)    (__blsmsk_u64((a)))
-
-#define _blsr_u64(a)      (__blsr_u64((a)))
-
-#define _tzcnt_u64(a)     (__tzcnt_u64((a)))
-
-/// Performs a bitwise AND of the second operand with the one's
-///    complement of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> ANDN </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer containing one of the operands.
-/// \param __Y
-///    An unsigned 64-bit integer containing one of the operands.
-/// \returns An unsigned 64-bit integer containing the bitwise AND of the second
-///    operand with the one's complement of the first operand.
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__andn_u64 (unsigned long long __X, unsigned long long __Y)
-{
-  return ~__X & __Y;
-}
-
-/* AMD-specified, double-leading-underscore version of BEXTR */
-/// Extracts the specified bits from the first operand and returns them
-///    in the least significant bits of the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer whose bits are to be extracted.
-/// \param __Y
-///    An unsigned 64-bit integer used to specify which bits are extracted. Bits
-///    [7:0] specify the index of the least significant bit. Bits [15:8] specify
-///    the number of bits to be extracted.
-/// \returns An unsigned 64-bit integer whose least significant bits contain the
-///    extracted bits.
-/// \see _bextr_u64
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__bextr_u64(unsigned long long __X, unsigned long long __Y)
-{
-  return __builtin_ia32_bextr_u64(__X, __Y);
-}
-
-/* Intel-specified, single-leading-underscore version of BEXTR */
-/// Extracts the specified bits from the first operand and returns them
-///     in the least significant bits of the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer whose bits are to be extracted.
-/// \param __Y
-///    An unsigned integer used to specify the index of the least significant
-///    bit for the bits to be extracted. Bits [7:0] specify the index.
-/// \param __Z
-///    An unsigned integer used to specify the number of bits to be extracted.
-///    Bits [7:0] specify the number of bits.
-/// \returns An unsigned 64-bit integer whose least significant bits contain the
-///    extracted bits.
-/// \see __bextr_u64
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-_bextr_u64(unsigned long long __X, unsigned int __Y, unsigned int __Z)
-{
-  return __builtin_ia32_bextr_u64 (__X, ((__Y & 0xff) | ((__Z & 0xff) << 8)));
-}
-
-/// Clears all bits in the source except for the least significant bit
-///    containing a value of 1 and returns the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BLSI </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer whose bits are to be cleared.
-/// \returns An unsigned 64-bit integer containing the result of clearing the
-///    bits from the source operand.
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__blsi_u64(unsigned long long __X)
-{
-  return __X & -__X;
-}
-
-/// Creates a mask whose bits are set to 1, using bit 0 up to and
-///    including the least significant bit that is set to 1 in the source
-///    operand and returns the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BLSMSK </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer used to create the mask.
-/// \returns An unsigned 64-bit integer containing the newly created mask.
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__blsmsk_u64(unsigned long long __X)
-{
-  return __X ^ (__X - 1);
-}
-
-/// Clears the least significant bit that is set to 1 in the source
-///    operand and returns the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> BLSR </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer containing the operand to be cleared.
-/// \returns An unsigned 64-bit integer containing the result of clearing the
-///    source operand.
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-__blsr_u64(unsigned long long __X)
-{
-  return __X & (__X - 1);
-}
-
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer whose trailing zeros are to be counted.
-/// \returns An unsigned 64-bit integer containing the number of trailing zero
-///    bits in the operand.
-static __inline__ unsigned long long __RELAXED_FN_ATTRS
-__tzcnt_u64(unsigned long long __X)
-{
-  return __builtin_ia32_tzcnt_u64(__X);
-}
-
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer whose trailing zeros are to be counted.
-/// \returns An 64-bit integer containing the number of trailing zero bits in
-///    the operand.
-static __inline__ long long __RELAXED_FN_ATTRS
-_mm_tzcnt_64(unsigned long long __X)
-{
-  return __builtin_ia32_tzcnt_u64(__X);
-}
-
-#endif /* __x86_64__ */
-
-#undef __DEFAULT_FN_ATTRS
-#undef __RELAXED_FN_ATTRS
-
-#endif /* __BMIINTRIN_H */
diff --git a/linux-x86/lib64/clang/10.0.1/include/emmintrin.h b/linux-x86/lib64/clang/10.0.1/include/emmintrin.h
deleted file mode 100644
index c8fefdf..0000000
--- a/linux-x86/lib64/clang/10.0.1/include/emmintrin.h
+++ /dev/null
@@ -1,4981 +0,0 @@
-/*===---- emmintrin.h - SSE2 intrinsics ------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __EMMINTRIN_H
-#define __EMMINTRIN_H
-
-#include <xmmintrin.h>
-
-typedef double __m128d __attribute__((__vector_size__(16), __aligned__(16)));
-typedef long long __m128i __attribute__((__vector_size__(16), __aligned__(16)));
-
-typedef double __m128d_u __attribute__((__vector_size__(16), __aligned__(1)));
-typedef long long __m128i_u __attribute__((__vector_size__(16), __aligned__(1)));
-
-/* Type defines.  */
-typedef double __v2df __attribute__ ((__vector_size__ (16)));
-typedef long long __v2di __attribute__ ((__vector_size__ (16)));
-typedef short __v8hi __attribute__((__vector_size__(16)));
-typedef char __v16qi __attribute__((__vector_size__(16)));
-
-/* Unsigned types */
-typedef unsigned long long __v2du __attribute__ ((__vector_size__ (16)));
-typedef unsigned short __v8hu __attribute__((__vector_size__(16)));
-typedef unsigned char __v16qu __attribute__((__vector_size__(16)));
-
-/* We need an explicitly signed variant for char. Note that this shouldn't
- * appear in the interface though. */
-typedef signed char __v16qs __attribute__((__vector_size__(16)));
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse2"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS_MMX __attribute__((__always_inline__, __nodebug__, __target__("mmx,sse2"), __min_vector_width__(64)))
-
-/// Adds lower double-precision values in both operands and returns the
-///    sum in the lower 64 bits of the result. The upper 64 bits of the result
-///    are copied from the upper double-precision value of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VADDSD / ADDSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    sum of the lower 64 bits of both operands. The upper 64 bits are copied
-///    from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_add_sd(__m128d __a, __m128d __b)
-{
-  __a[0] += __b[0];
-  return __a;
-}
-
-/// Adds two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VADDPD / ADDPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] containing the sums of both
-///    operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_add_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2df)__a + (__v2df)__b);
-}
-
-/// Subtracts the lower double-precision value of the second operand
-///    from the lower double-precision value of the first operand and returns
-///    the difference in the lower 64 bits of the result. The upper 64 bits of
-///    the result are copied from the upper double-precision value of the first
-///    operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSUBSD / SUBSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the minuend.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing the subtrahend.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    difference of the lower 64 bits of both operands. The upper 64 bits are
-///    copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sub_sd(__m128d __a, __m128d __b)
-{
-  __a[0] -= __b[0];
-  return __a;
-}
-
-/// Subtracts two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSUBPD / SUBPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the minuend.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing the subtrahend.
-/// \returns A 128-bit vector of [2 x double] containing the differences between
-///    both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sub_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2df)__a - (__v2df)__b);
-}
-
-/// Multiplies lower double-precision values in both operands and returns
-///    the product in the lower 64 bits of the result. The upper 64 bits of the
-///    result are copied from the upper double-precision value of the first
-///    operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMULSD / MULSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    product of the lower 64 bits of both operands. The upper 64 bits are
-///    copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mul_sd(__m128d __a, __m128d __b)
-{
-  __a[0] *= __b[0];
-  return __a;
-}
-
-/// Multiplies two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMULPD / MULPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \returns A 128-bit vector of [2 x double] containing the products of both
-///    operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mul_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2df)__a * (__v2df)__b);
-}
-
-/// Divides the lower double-precision value of the first operand by the
-///    lower double-precision value of the second operand and returns the
-///    quotient in the lower 64 bits of the result. The upper 64 bits of the
-///    result are copied from the upper double-precision value of the first
-///    operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VDIVSD / DIVSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the dividend.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing divisor.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    quotient of the lower 64 bits of both operands. The upper 64 bits are
-///    copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_div_sd(__m128d __a, __m128d __b)
-{
-  __a[0] /= __b[0];
-  return __a;
-}
-
-/// Performs an element-by-element division of two 128-bit vectors of
-///    [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VDIVPD / DIVPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the dividend.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing the divisor.
-/// \returns A 128-bit vector of [2 x double] containing the quotients of both
-///    operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_div_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2df)__a / (__v2df)__b);
-}
-
-/// Calculates the square root of the lower double-precision value of
-///    the second operand and returns it in the lower 64 bits of the result.
-///    The upper 64 bits of the result are copied from the upper
-///    double-precision value of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSQRTSD / SQRTSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    upper 64 bits of this operand are copied to the upper 64 bits of the
-///    result.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    square root is calculated using the lower 64 bits of this operand.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    square root of the lower 64 bits of operand \a __b, and whose upper 64
-///    bits are copied from the upper 64 bits of operand \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sqrt_sd(__m128d __a, __m128d __b)
-{
-  __m128d __c = __builtin_ia32_sqrtsd((__v2df)__b);
-  return __extension__ (__m128d) { __c[0], __a[1] };
-}
-
-/// Calculates the square root of the each of two values stored in a
-///    128-bit vector of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VSQRTPD / SQRTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector of [2 x double] containing the square roots of the
-///    values in the operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sqrt_pd(__m128d __a)
-{
-  return __builtin_ia32_sqrtpd((__v2df)__a);
-}
-
-/// Compares lower 64-bit double-precision values of both operands, and
-///    returns the lesser of the pair of values in the lower 64-bits of the
-///    result. The upper 64 bits of the result are copied from the upper
-///    double-precision value of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMINSD / MINSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    lower 64 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    lower 64 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    minimum value between both operands. The upper 64 bits are copied from
-///    the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_min_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_minsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Performs element-by-element comparison of the two 128-bit vectors of
-///    [2 x double] and returns the vector containing the lesser of each pair of
-///    values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMINPD / MINPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \returns A 128-bit vector of [2 x double] containing the minimum values
-///    between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_min_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_minpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares lower 64-bit double-precision values of both operands, and
-///    returns the greater of the pair of values in the lower 64-bits of the
-///    result. The upper 64 bits of the result are copied from the upper
-///    double-precision value of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMAXSD / MAXSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    lower 64 bits of this operand are used in the comparison.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands. The
-///    lower 64 bits of this operand are used in the comparison.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    maximum value between both operands. The upper 64 bits are copied from
-///    the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_max_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_maxsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Performs element-by-element comparison of the two 128-bit vectors of
-///    [2 x double] and returns the vector containing the greater of each pair
-///    of values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMAXPD / MAXPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the operands.
-/// \returns A 128-bit vector of [2 x double] containing the maximum values
-///    between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_max_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_maxpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Performs a bitwise AND of two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPAND / PAND </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the
-///    values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_and_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2du)__a & (__v2du)__b);
-}
-
-/// Performs a bitwise AND of two 128-bit vectors of [2 x double], using
-///    the one's complement of the values contained in the first source operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPANDN / PANDN </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the left source operand. The
-///    one's complement of this value is used in the bitwise AND.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing the right source operand.
-/// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the
-///    values in the second operand and the one's complement of the first
-///    operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_andnot_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)(~(__v2du)__a & (__v2du)__b);
-}
-
-/// Performs a bitwise OR of two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPOR / POR </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] containing the bitwise OR of the
-///    values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_or_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2du)__a | (__v2du)__b);
-}
-
-/// Performs a bitwise XOR of two 128-bit vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPXOR / PXOR </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \param __b
-///    A 128-bit vector of [2 x double] containing one of the source operands.
-/// \returns A 128-bit vector of [2 x double] containing the bitwise XOR of the
-///    values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_xor_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)((__v2du)__a ^ (__v2du)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] for equality. Each comparison yields 0x0
-///    for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPEQPD / CMPEQPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpeq_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpeqpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are less than those in the second operand. Each comparison
-///    yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTPD / CMPLTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmplt_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpltpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are less than or equal to those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLEPD / CMPLEPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmple_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmplepd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are greater than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTPD / CMPLTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpgt_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpltpd((__v2df)__b, (__v2df)__a);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are greater than or equal to those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLEPD / CMPLEPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpge_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmplepd((__v2df)__b, (__v2df)__a);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are ordered with respect to those in the second operand.
-///
-///    A pair of double-precision values are "ordered" with respect to each
-///    other if neither value is a NaN. Each comparison yields 0x0 for false,
-///    0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPORDPD / CMPORDPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpord_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpordpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are unordered with respect to those in the second operand.
-///
-///    A pair of double-precision values are "unordered" with respect to each
-///    other if one or both values are NaN. Each comparison yields 0x0 for
-///    false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPUNORDPD / CMPUNORDPD </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpunord_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpunordpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are unequal to those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNEQPD / CMPNEQPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpneq_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpneqpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are not less than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTPD / CMPNLTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnlt_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are not less than or equal to those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLEPD / CMPNLEPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnle_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are not greater than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTPD / CMPNLTPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpngt_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__b, (__v2df)__a);
-}
-
-/// Compares each of the corresponding double-precision values of the
-///    128-bit vectors of [2 x double] to determine if the values in the first
-///    operand are not greater than or equal to those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLEPD / CMPNLEPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \param __b
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnge_pd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__b, (__v2df)__a);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] for equality.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPEQSD / CMPEQSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpeq_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpeqsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTSD / CMPLTSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmplt_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpltsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLESD / CMPLESD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmple_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmplesd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than the corresponding value
-///    in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLTSD / CMPLTSD </c> instruction.
-///
-/// \param __a
-///     A 128-bit vector of [2 x double]. The lower double-precision value is
-///     compared to the lower double-precision value of \a __b.
-/// \param __b
-///     A 128-bit vector of [2 x double]. The lower double-precision value is
-///     compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///     results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpgt_sd(__m128d __a, __m128d __b)
-{
-  __m128d __c = __builtin_ia32_cmpltsd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPLESD / CMPLESD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpge_sd(__m128d __a, __m128d __b)
-{
-  __m128d __c = __builtin_ia32_cmplesd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is "ordered" with respect to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair
-///    of double-precision values are "ordered" with respect to each other if
-///    neither value is a NaN.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPORDSD / CMPORDSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpord_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpordsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is "unordered" with respect to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair
-///    of double-precision values are "unordered" with respect to each other if
-///    one or both values are NaN.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPUNORDSD / CMPUNORDSD </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpunord_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpunordsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is unequal to the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNEQSD / CMPNEQSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpneq_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpneqsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is not less than the corresponding
-///    value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTSD / CMPNLTSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnlt_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnltsd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is not less than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLESD / CMPNLESD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns  A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnle_sd(__m128d __a, __m128d __b)
-{
-  return (__m128d)__builtin_ia32_cmpnlesd((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is not greater than the corresponding
-///    value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLTSD / CMPNLTSD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpngt_sd(__m128d __a, __m128d __b)
-{
-  __m128d __c = __builtin_ia32_cmpnltsd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is not greater than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCMPNLESD / CMPNLESD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns A 128-bit vector. The lower 64 bits contains the comparison
-///    results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnge_sd(__m128d __a, __m128d __b)
-{
-  __m128d __c = __builtin_ia32_cmpnlesd((__v2df)__b, (__v2df)__a);
-  return __extension__ (__m128d) { __c[0], __a[1] };
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] for equality.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comieq_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdeq((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comilt_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdlt((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///     A 128-bit vector of [2 x double]. The lower double-precision value is
-///     compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comile_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdle((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than the corresponding value
-///    in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comigt_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdgt((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comige_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdge((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is unequal to the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two
-///    lower double-precision values is NaN, 1 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 1 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comineq_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_comisdneq((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] for equality. The
-///    comparison yields 0 for false, 1 for true.
-///
-///    If either of the two lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomieq_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdeq((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two lower
-///    double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomilt_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdlt((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is less than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two lower
-///    double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///     A 128-bit vector of [2 x double]. The lower double-precision value is
-///     compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomile_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdle((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than the corresponding value
-///    in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two lower
-///    double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///     A 128-bit vector of [2 x double]. The lower double-precision value is
-///     compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///     lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomigt_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdgt((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is greater than or equal to the
-///    corresponding value in the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true.  If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison results. If either of the two
-///    lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomige_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdge((__v2df)__a, (__v2df)__b);
-}
-
-/// Compares the lower double-precision floating-point values in each of
-///    the two 128-bit floating-point vectors of [2 x double] to determine if
-///    the value in the first parameter is unequal to the corresponding value in
-///    the second parameter.
-///
-///    The comparison yields 0 for false, 1 for true. If either of the two lower
-///    double-precision values is NaN, 1 is returned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __b.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision value is
-///    compared to the lower double-precision value of \a __a.
-/// \returns An integer containing the comparison result. If either of the two
-///    lower double-precision values is NaN, 1 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomineq_sd(__m128d __a, __m128d __b)
-{
-  return __builtin_ia32_ucomisdneq((__v2df)__a, (__v2df)__b);
-}
-
-/// Converts the two double-precision floating-point elements of a
-///    128-bit vector of [2 x double] into two single-precision floating-point
-///    values, returned in the lower 64 bits of a 128-bit vector of [4 x float].
-///    The upper 64 bits of the result vector are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPD2PS / CVTPD2PS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
-///    converted values. The upper 64 bits are set to zero.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtpd_ps(__m128d __a)
-{
-  return __builtin_ia32_cvtpd2ps((__v2df)__a);
-}
-
-/// Converts the lower two single-precision floating-point elements of a
-///    128-bit vector of [4 x float] into two double-precision floating-point
-///    values, returned in a 128-bit vector of [2 x double]. The upper two
-///    elements of the input vector are unused.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPS2PD / CVTPS2PD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The lower two single-precision
-///    floating-point elements are converted to double-precision values. The
-///    upper two elements are unused.
-/// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtps_pd(__m128 __a)
-{
-  return (__m128d) __builtin_convertvector(
-      __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1), __v2df);
-}
-
-/// Converts the lower two integer elements of a 128-bit vector of
-///    [4 x i32] into two double-precision floating-point values, returned in a
-///    128-bit vector of [2 x double].
-///
-///    The upper two elements of the input vector are unused.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTDQ2PD / CVTDQ2PD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector of [4 x i32]. The lower two integer elements are
-///    converted to double-precision values.
-///
-///    The upper two elements are unused.
-/// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtepi32_pd(__m128i __a)
-{
-  return (__m128d) __builtin_convertvector(
-      __builtin_shufflevector((__v4si)__a, (__v4si)__a, 0, 1), __v2df);
-}
-
-/// Converts the two double-precision floating-point elements of a
-///    128-bit vector of [2 x double] into two signed 32-bit integer values,
-///    returned in the lower 64 bits of a 128-bit vector of [4 x i32]. The upper
-///    64 bits of the result vector are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPD2DQ / CVTPD2DQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the
-///    converted values. The upper 64 bits are set to zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtpd_epi32(__m128d __a)
-{
-  return __builtin_ia32_cvtpd2dq((__v2df)__a);
-}
-
-/// Converts the low-order element of a 128-bit vector of [2 x double]
-///    into a 32-bit signed integer value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSD2SI / CVTSD2SI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
-///    conversion.
-/// \returns A 32-bit signed integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvtsd_si32(__m128d __a)
-{
-  return __builtin_ia32_cvtsd2si((__v2df)__a);
-}
-
-/// Converts the lower double-precision floating-point element of a
-///    128-bit vector of [2 x double], in the second parameter, into a
-///    single-precision floating-point value, returned in the lower 32 bits of a
-///    128-bit vector of [4 x float]. The upper 96 bits of the result vector are
-///    copied from the upper 96 bits of the first parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSD2SS / CVTSD2SS </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float]. The upper 96 bits of this parameter are
-///    copied to the upper 96 bits of the result.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower double-precision
-///    floating-point element is used in the conversion.
-/// \returns A 128-bit vector of [4 x float]. The lower 32 bits contain the
-///    converted value from the second parameter. The upper 96 bits are copied
-///    from the upper 96 bits of the first parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtsd_ss(__m128 __a, __m128d __b)
-{
-  return (__m128)__builtin_ia32_cvtsd2ss((__v4sf)__a, (__v2df)__b);
-}
-
-/// Converts a 32-bit signed integer value, in the second parameter, into
-///    a double-precision floating-point value, returned in the lower 64 bits of
-///    a 128-bit vector of [2 x double]. The upper 64 bits of the result vector
-///    are copied from the upper 64 bits of the first parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSI2SD / CVTSI2SD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The upper 64 bits of this parameter are
-///    copied to the upper 64 bits of the result.
-/// \param __b
-///    A 32-bit signed integer containing the value to be converted.
-/// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the
-///    converted value from the second parameter. The upper 64 bits are copied
-///    from the upper 64 bits of the first parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtsi32_sd(__m128d __a, int __b)
-{
-  __a[0] = __b;
-  return __a;
-}
-
-/// Converts the lower single-precision floating-point element of a
-///    128-bit vector of [4 x float], in the second parameter, into a
-///    double-precision floating-point value, returned in the lower 64 bits of
-///    a 128-bit vector of [2 x double]. The upper 64 bits of the result vector
-///    are copied from the upper 64 bits of the first parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSS2SD / CVTSS2SD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The upper 64 bits of this parameter are
-///    copied to the upper 64 bits of the result.
-/// \param __b
-///    A 128-bit vector of [4 x float]. The lower single-precision
-///    floating-point element is used in the conversion.
-/// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the
-///    converted value from the second parameter. The upper 64 bits are copied
-///    from the upper 64 bits of the first parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtss_sd(__m128d __a, __m128 __b)
-{
-  __a[0] = __b[0];
-  return __a;
-}
-
-/// Converts the two double-precision floating-point elements of a
-///    128-bit vector of [2 x double] into two signed 32-bit integer values,
-///    returned in the lower 64 bits of a 128-bit vector of [4 x i32].
-///
-///    If the result of either conversion is inexact, the result is truncated
-///    (rounded towards zero) regardless of the current MXCSR setting. The upper
-///    64 bits of the result vector are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTPD2DQ / CVTTPD2DQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the
-///    converted values. The upper 64 bits are set to zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvttpd_epi32(__m128d __a)
-{
-  return (__m128i)__builtin_ia32_cvttpd2dq((__v2df)__a);
-}
-
-/// Converts the low-order element of a [2 x double] vector into a 32-bit
-///    signed integer value, truncating the result when it is inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTSD2SI / CVTTSD2SI </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
-///    conversion.
-/// \returns A 32-bit signed integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvttsd_si32(__m128d __a)
-{
-  return __builtin_ia32_cvttsd2si((__v2df)__a);
-}
-
-/// Converts the two double-precision floating-point elements of a
-///    128-bit vector of [2 x double] into two signed 32-bit integer values,
-///    returned in a 64-bit vector of [2 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPD2PI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 64-bit vector of [2 x i32] containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpd_pi32(__m128d __a)
-{
-  return (__m64)__builtin_ia32_cvtpd2pi((__v2df)__a);
-}
-
-/// Converts the two double-precision floating-point elements of a
-///    128-bit vector of [2 x double] into two signed 32-bit integer values,
-///    returned in a 64-bit vector of [2 x i32].
-///
-///    If the result of either conversion is inexact, the result is truncated
-///    (rounded towards zero) regardless of the current MXCSR setting.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTTPD2PI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double].
-/// \returns A 64-bit vector of [2 x i32] containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvttpd_pi32(__m128d __a)
-{
-  return (__m64)__builtin_ia32_cvttpd2pi((__v2df)__a);
-}
-
-/// Converts the two signed 32-bit integer elements of a 64-bit vector of
-///    [2 x i32] into two double-precision floating-point values, returned in a
-///    128-bit vector of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CVTPI2PD </c> instruction.
-///
-/// \param __a
-///    A 64-bit vector of [2 x i32].
-/// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpi32_pd(__m64 __a)
-{
-  return __builtin_ia32_cvtpi2pd((__v2si)__a);
-}
-
-/// Returns the low-order element of a 128-bit vector of [2 x double] as
-///    a double-precision floating-point value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower 64 bits are returned.
-/// \returns A double-precision floating-point value copied from the lower 64
-///    bits of \a __a.
-static __inline__ double __DEFAULT_FN_ATTRS
-_mm_cvtsd_f64(__m128d __a)
-{
-  return __a[0];
-}
-
-/// Loads a 128-bit floating-point vector of [2 x double] from an aligned
-///    memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPD / MOVAPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location has to be 16-byte aligned.
-/// \returns A 128-bit vector of [2 x double] containing the loaded values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load_pd(double const *__dp)
-{
-  return *(__m128d*)__dp;
-}
-
-/// Loads a double-precision floating-point value from a specified memory
-///    location and duplicates it to both vector elements of a 128-bit vector of
-///    [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDDUP / MOVDDUP </c> instruction.
-///
-/// \param __dp
-///    A pointer to a memory location containing a double-precision value.
-/// \returns A 128-bit vector of [2 x double] containing the loaded and
-///    duplicated values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load1_pd(double const *__dp)
-{
-  struct __mm_load1_pd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  double __u = ((struct __mm_load1_pd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __u, __u };
-}
-
-#define        _mm_load_pd1(dp)        _mm_load1_pd(dp)
-
-/// Loads two double-precision values, in reverse order, from an aligned
-///    memory location into a 128-bit vector of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPD / MOVAPD </c> instruction +
-/// needed shuffling instructions. In AVX mode, the shuffling may be combined
-/// with the \c VMOVAPD, resulting in only a \c VPERMILPD instruction.
-///
-/// \param __dp
-///    A 16-byte aligned pointer to an array of double-precision values to be
-///    loaded in reverse order.
-/// \returns A 128-bit vector of [2 x double] containing the reversed loaded
-///    values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadr_pd(double const *__dp)
-{
-  __m128d __u = *(__m128d*)__dp;
-  return __builtin_shufflevector((__v2df)__u, (__v2df)__u, 1, 0);
-}
-
-/// Loads a 128-bit floating-point vector of [2 x double] from an
-///    unaligned memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVUPD / MOVUPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \returns A 128-bit vector of [2 x double] containing the loaded values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadu_pd(double const *__dp)
-{
-  struct __loadu_pd {
-    __m128d_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((struct __loadu_pd*)__dp)->__v;
-}
-
-/// Loads a 64-bit integer value to the low element of a 128-bit integer
-///    vector and clears the upper element.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __a
-///    A pointer to a 64-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \returns A 128-bit vector of [2 x i64] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si64(void const *__a)
-{
-  struct __loadu_si64 {
-    long long __v;
-  } __attribute__((__packed__, __may_alias__));
-  long long __u = ((struct __loadu_si64*)__a)->__v;
-  return __extension__ (__m128i)(__v2di){__u, 0LL};
-}
-
-/// Loads a 32-bit integer value to the low element of a 128-bit integer
-///    vector and clears the upper element.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
-///
-/// \param __a
-///    A pointer to a 32-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \returns A 128-bit vector of [4 x i32] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si32(void const *__a)
-{
-  struct __loadu_si32 {
-    int __v;
-  } __attribute__((__packed__, __may_alias__));
-  int __u = ((struct __loadu_si32*)__a)->__v;
-  return __extension__ (__m128i)(__v4si){__u, 0, 0, 0};
-}
-
-/// Loads a 16-bit integer value to the low element of a 128-bit integer
-///    vector and clears the upper element.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic does not correspond to a specific instruction.
-///
-/// \param __a
-///    A pointer to a 16-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \returns A 128-bit vector of [8 x i16] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si16(void const *__a)
-{
-  struct __loadu_si16 {
-    short __v;
-  } __attribute__((__packed__, __may_alias__));
-  short __u = ((struct __loadu_si16*)__a)->__v;
-  return __extension__ (__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0};
-}
-
-/// Loads a 64-bit double-precision value to the low element of a
-///    128-bit integer vector and clears the upper element.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVSD / MOVSD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a memory location containing a double-precision value.
-///    The address of the memory location does not have to be aligned.
-/// \returns A 128-bit vector of [2 x double] containing the loaded value.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load_sd(double const *__dp)
-{
-  struct __mm_load_sd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  double __u = ((struct __mm_load_sd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __u, 0 };
-}
-
-/// Loads a double-precision value into the high-order bits of a 128-bit
-///    vector of [2 x double]. The low-order bits are copied from the low-order
-///    bits of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVHPD / MOVHPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [63:0] are written to bits [63:0] of the result.
-/// \param __dp
-///    A pointer to a 64-bit memory location containing a double-precision
-///    floating-point value that is loaded. The loaded value is written to bits
-///    [127:64] of the result. The address of the memory location does not have
-///    to be aligned.
-/// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadh_pd(__m128d __a, double const *__dp)
-{
-  struct __mm_loadh_pd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  double __u = ((struct __mm_loadh_pd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __a[0], __u };
-}
-
-/// Loads a double-precision value into the low-order bits of a 128-bit
-///    vector of [2 x double]. The high-order bits are copied from the
-///    high-order bits of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVLPD / MOVLPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [127:64] are written to bits [127:64] of the result.
-/// \param __dp
-///    A pointer to a 64-bit memory location containing a double-precision
-///    floating-point value that is loaded. The loaded value is written to bits
-///    [63:0] of the result. The address of the memory location does not have to
-///    be aligned.
-/// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadl_pd(__m128d __a, double const *__dp)
-{
-  struct __mm_loadl_pd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  double __u = ((struct __mm_loadl_pd_struct*)__dp)->__u;
-  return __extension__ (__m128d){ __u, __a[1] };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double] with
-///    unspecified content. This could be used as an argument to another
-///    intrinsic function where the argument is required but the value is not
-///    actually used.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \returns A 128-bit floating-point vector of [2 x double] with unspecified
-///    content.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_undefined_pd(void)
-{
-  return (__m128d)__builtin_ia32_undef128();
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double]. The lower
-///    64 bits of the vector are initialized with the specified double-precision
-///    floating-point value. The upper 64 bits are set to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __w
-///    A double-precision floating-point value used to initialize the lower 64
-///    bits of the result.
-/// \returns An initialized 128-bit floating-point vector of [2 x double]. The
-///    lower 64 bits contain the value of the parameter. The upper 64 bits are
-///    set to zero.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_sd(double __w)
-{
-  return __extension__ (__m128d){ __w, 0 };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double], with each
-///    of the two double-precision floating-point vector elements set to the
-///    specified double-precision floating-point value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDDUP / MOVLHPS </c> instruction.
-///
-/// \param __w
-///    A double-precision floating-point value used to initialize each vector
-///    element of the result.
-/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set1_pd(double __w)
-{
-  return __extension__ (__m128d){ __w, __w };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double], with each
-///    of the two double-precision floating-point vector elements set to the
-///    specified double-precision floating-point value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDDUP / MOVLHPS </c> instruction.
-///
-/// \param __w
-///    A double-precision floating-point value used to initialize each vector
-///    element of the result.
-/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_pd1(double __w)
-{
-  return _mm_set1_pd(__w);
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double]
-///    initialized with the specified double-precision floating-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKLPD / UNPCKLPD </c> instruction.
-///
-/// \param __w
-///    A double-precision floating-point value used to initialize the upper 64
-///    bits of the result.
-/// \param __x
-///    A double-precision floating-point value used to initialize the lower 64
-///    bits of the result.
-/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_pd(double __w, double __x)
-{
-  return __extension__ (__m128d){ __x, __w };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double],
-///    initialized in reverse order with the specified double-precision
-///    floating-point values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKLPD / UNPCKLPD </c> instruction.
-///
-/// \param __w
-///    A double-precision floating-point value used to initialize the lower 64
-///    bits of the result.
-/// \param __x
-///    A double-precision floating-point value used to initialize the upper 64
-///    bits of the result.
-/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_setr_pd(double __w, double __x)
-{
-  return __extension__ (__m128d){ __w, __x };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double]
-///    initialized to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VXORPS / XORPS </c> instruction.
-///
-/// \returns An initialized 128-bit floating-point vector of [2 x double] with
-///    all elements set to zero.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_setzero_pd(void)
-{
-  return __extension__ (__m128d){ 0, 0 };
-}
-
-/// Constructs a 128-bit floating-point vector of [2 x double]. The lower
-///    64 bits are set to the lower 64 bits of the second parameter. The upper
-///    64 bits are set to the upper 64 bits of the first parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VBLENDPD / BLENDPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The upper 64 bits are written to the
-///    upper 64 bits of the result.
-/// \param __b
-///    A 128-bit vector of [2 x double]. The lower 64 bits are written to the
-///    lower 64 bits of the result.
-/// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_move_sd(__m128d __a, __m128d __b)
-{
-  __a[0] = __b[0];
-  return __a;
-}
-
-/// Stores the lower 64 bits of a 128-bit vector of [2 x double] to a
-///    memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVSD / MOVSD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 64-bit memory location.
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_sd(double *__dp, __m128d __a)
-{
-  struct __mm_store_sd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_store_sd_struct*)__dp)->__u = __a[0];
-}
-
-/// Moves packed double-precision values from a 128-bit vector of
-///    [2 x double] to a memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c>VMOVAPD / MOVAPS</c> instruction.
-///
-/// \param __dp
-///    A pointer to an aligned memory location that can store two
-///    double-precision values.
-/// \param __a
-///    A packed 128-bit vector of [2 x double] containing the values to be
-///    moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_pd(double *__dp, __m128d __a)
-{
-  *(__m128d*)__dp = __a;
-}
-
-/// Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to
-///    the upper and lower 64 bits of a memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the
-///   <c> VMOVDDUP + VMOVAPD / MOVLHPS + MOVAPS </c> instruction.
-///
-/// \param __dp
-///    A pointer to a memory location that can store two double-precision
-///    values.
-/// \param __a
-///    A 128-bit vector of [2 x double] whose lower 64 bits are copied to each
-///    of the values in \a __dp.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store1_pd(double *__dp, __m128d __a)
-{
-  __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
-  _mm_store_pd(__dp, __a);
-}
-
-/// Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to
-///    the upper and lower 64 bits of a memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the
-///   <c> VMOVDDUP + VMOVAPD / MOVLHPS + MOVAPS </c> instruction.
-///
-/// \param __dp
-///    A pointer to a memory location that can store two double-precision
-///    values.
-/// \param __a
-///    A 128-bit vector of [2 x double] whose lower 64 bits are copied to each
-///    of the values in \a __dp.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_pd1(double *__dp, __m128d __a)
-{
-  _mm_store1_pd(__dp, __a);
-}
-
-/// Stores a 128-bit vector of [2 x double] into an unaligned memory
-///    location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVUPD / MOVUPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 128-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_pd(double *__dp, __m128d __a)
-{
-  struct __storeu_pd {
-    __m128d_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_pd*)__dp)->__v = __a;
-}
-
-/// Stores two double-precision values, in reverse order, from a 128-bit
-///    vector of [2 x double] to a 16-byte aligned memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to a shuffling instruction followed by a
-/// <c> VMOVAPD / MOVAPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 16-byte aligned memory location that can store two
-///    double-precision values.
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the values to be reversed and
-///    stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storer_pd(double *__dp, __m128d __a)
-{
-  __a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 1, 0);
-  *(__m128d *)__dp = __a;
-}
-
-/// Stores the upper 64 bits of a 128-bit vector of [2 x double] to a
-///    memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVHPD / MOVHPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 64-bit memory location.
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeh_pd(double *__dp, __m128d __a)
-{
-  struct __mm_storeh_pd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[1];
-}
-
-/// Stores the lower 64 bits of a 128-bit vector of [2 x double] to a
-///    memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVLPD / MOVLPD </c> instruction.
-///
-/// \param __dp
-///    A pointer to a 64-bit memory location.
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storel_pd(double *__dp, __m128d __a)
-{
-  struct __mm_storeh_pd_struct {
-    double __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[0];
-}
-
-/// Adds the corresponding elements of two 128-bit vectors of [16 x i8],
-///    saving the lower 8 bits of each sum in the corresponding element of a
-///    128-bit result vector of [16 x i8].
-///
-///    The integer elements of both parameters can be either signed or unsigned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDB / PADDB </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [16 x i8].
-/// \param __b
-///    A 128-bit vector of [16 x i8].
-/// \returns A 128-bit vector of [16 x i8] containing the sums of both
-///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v16qu)__a + (__v16qu)__b);
-}
-
-/// Adds the corresponding elements of two 128-bit vectors of [8 x i16],
-///    saving the lower 16 bits of each sum in the corresponding element of a
-///    128-bit result vector of [8 x i16].
-///
-///    The integer elements of both parameters can be either signed or unsigned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDW / PADDW </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [8 x i16].
-/// \param __b
-///    A 128-bit vector of [8 x i16].
-/// \returns A 128-bit vector of [8 x i16] containing the sums of both
-///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v8hu)__a + (__v8hu)__b);
-}
-
-/// Adds the corresponding elements of two 128-bit vectors of [4 x i32],
-///    saving the lower 32 bits of each sum in the corresponding element of a
-///    128-bit result vector of [4 x i32].
-///
-///    The integer elements of both parameters can be either signed or unsigned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDD / PADDD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x i32].
-/// \param __b
-///    A 128-bit vector of [4 x i32].
-/// \returns A 128-bit vector of [4 x i32] containing the sums of both
-///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4su)__a + (__v4su)__b);
-}
-
-/// Adds two signed or unsigned 64-bit integer values, returning the
-///    lower 64 bits of the sum.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PADDQ </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer.
-/// \param __b
-///    A 64-bit integer.
-/// \returns A 64-bit integer containing the sum of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_add_si64(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_paddq((__v1di)__a, (__v1di)__b);
-}
-
-/// Adds the corresponding elements of two 128-bit vectors of [2 x i64],
-///    saving the lower 64 bits of each sum in the corresponding element of a
-///    128-bit result vector of [2 x i64].
-///
-///    The integer elements of both parameters can be either signed or unsigned.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDQ / PADDQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x i64].
-/// \param __b
-///    A 128-bit vector of [2 x i64].
-/// \returns A 128-bit vector of [2 x i64] containing the sums of both
-///    parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a + (__v2du)__b);
-}
-
-/// Adds, with saturation, the corresponding elements of two 128-bit
-///    signed [16 x i8] vectors, saving each sum in the corresponding element of
-///    a 128-bit result vector of [16 x i8]. Positive sums greater than 0x7F are
-///    saturated to 0x7F. Negative sums less than 0x80 are saturated to 0x80.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDSB / PADDSB </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [16 x i8] vector.
-/// \param __b
-///    A 128-bit signed [16 x i8] vector.
-/// \returns A 128-bit signed [16 x i8] vector containing the saturated sums of
-///    both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddsb128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Adds, with saturation, the corresponding elements of two 128-bit
-///    signed [8 x i16] vectors, saving each sum in the corresponding element of
-///    a 128-bit result vector of [8 x i16]. Positive sums greater than 0x7FFF
-///    are saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to
-///    0x8000.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDSW / PADDSW </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [8 x i16] vector containing the saturated sums of
-///    both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddsw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Adds, with saturation, the corresponding elements of two 128-bit
-///    unsigned [16 x i8] vectors, saving each sum in the corresponding element
-///    of a 128-bit result vector of [16 x i8]. Positive sums greater than 0xFF
-///    are saturated to 0xFF. Negative sums are saturated to 0x00.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDUSB / PADDUSB </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [16 x i8] vector.
-/// \param __b
-///    A 128-bit unsigned [16 x i8] vector.
-/// \returns A 128-bit unsigned [16 x i8] vector containing the saturated sums
-///    of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddusb128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Adds, with saturation, the corresponding elements of two 128-bit
-///    unsigned [8 x i16] vectors, saving each sum in the corresponding element
-///    of a 128-bit result vector of [8 x i16]. Positive sums greater than
-///    0xFFFF are saturated to 0xFFFF. Negative sums are saturated to 0x0000.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPADDUSB / PADDUSB </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [8 x i16] vector.
-/// \param __b
-///    A 128-bit unsigned [8 x i16] vector.
-/// \returns A 128-bit unsigned [8 x i16] vector containing the saturated sums
-///    of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epu16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_paddusw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Computes the rounded avarages of corresponding elements of two
-///    128-bit unsigned [16 x i8] vectors, saving each result in the
-///    corresponding element of a 128-bit result vector of [16 x i8].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPAVGB / PAVGB </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [16 x i8] vector.
-/// \param __b
-///    A 128-bit unsigned [16 x i8] vector.
-/// \returns A 128-bit unsigned [16 x i8] vector containing the rounded
-///    averages of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_avg_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pavgb128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Computes the rounded avarages of corresponding elements of two
-///    128-bit unsigned [8 x i16] vectors, saving each result in the
-///    corresponding element of a 128-bit result vector of [8 x i16].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPAVGW / PAVGW </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [8 x i16] vector.
-/// \param __b
-///    A 128-bit unsigned [8 x i16] vector.
-/// \returns A 128-bit unsigned [8 x i16] vector containing the rounded
-///    averages of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_avg_epu16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pavgw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Multiplies the corresponding elements of two 128-bit signed [8 x i16]
-///    vectors, producing eight intermediate 32-bit signed integer products, and
-///    adds the consecutive pairs of 32-bit products to form a 128-bit signed
-///    [4 x i32] vector.
-///
-///    For example, bits [15:0] of both parameters are multiplied producing a
-///    32-bit product, bits [31:16] of both parameters are multiplied producing
-///    a 32-bit product, and the sum of those two products becomes bits [31:0]
-///    of the result.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMADDWD / PMADDWD </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [4 x i32] vector containing the sums of products
-///    of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_madd_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Compares corresponding elements of two 128-bit signed [8 x i16]
-///    vectors, saving the greater value from each comparison in the
-///    corresponding element of a 128-bit result vector of [8 x i16].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMAXSW / PMAXSW </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [8 x i16] vector containing the greater value of
-///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Compares corresponding elements of two 128-bit unsigned [16 x i8]
-///    vectors, saving the greater value from each comparison in the
-///    corresponding element of a 128-bit result vector of [16 x i8].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMAXUB / PMAXUB </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [16 x i8] vector.
-/// \param __b
-///    A 128-bit unsigned [16 x i8] vector.
-/// \returns A 128-bit unsigned [16 x i8] vector containing the greater value of
-///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmaxub128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Compares corresponding elements of two 128-bit signed [8 x i16]
-///    vectors, saving the smaller value from each comparison in the
-///    corresponding element of a 128-bit result vector of [8 x i16].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMINSW / PMINSW </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [8 x i16] vector containing the smaller value of
-///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pminsw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Compares corresponding elements of two 128-bit unsigned [16 x i8]
-///    vectors, saving the smaller value from each comparison in the
-///    corresponding element of a 128-bit result vector of [16 x i8].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMINUB / PMINUB </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [16 x i8] vector.
-/// \param __b
-///    A 128-bit unsigned [16 x i8] vector.
-/// \returns A 128-bit unsigned [16 x i8] vector containing the smaller value of
-///    each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pminub128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Multiplies the corresponding elements of two signed [8 x i16]
-///    vectors, saving the upper 16 bits of each 32-bit product in the
-///    corresponding element of a 128-bit signed [8 x i16] result vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMULHW / PMULHW </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [8 x i16] vector containing the upper 16 bits of
-///    each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mulhi_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmulhw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Multiplies the corresponding elements of two unsigned [8 x i16]
-///    vectors, saving the upper 16 bits of each 32-bit product in the
-///    corresponding element of a 128-bit unsigned [8 x i16] result vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMULHUW / PMULHUW </c> instruction.
-///
-/// \param __a
-///    A 128-bit unsigned [8 x i16] vector.
-/// \param __b
-///    A 128-bit unsigned [8 x i16] vector.
-/// \returns A 128-bit unsigned [8 x i16] vector containing the upper 16 bits
-///    of each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mulhi_epu16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Multiplies the corresponding elements of two signed [8 x i16]
-///    vectors, saving the lower 16 bits of each 32-bit product in the
-///    corresponding element of a 128-bit signed [8 x i16] result vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMULLW / PMULLW </c> instruction.
-///
-/// \param __a
-///    A 128-bit signed [8 x i16] vector.
-/// \param __b
-///    A 128-bit signed [8 x i16] vector.
-/// \returns A 128-bit signed [8 x i16] vector containing the lower 16 bits of
-///    each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mullo_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v8hu)__a * (__v8hu)__b);
-}
-
-/// Multiplies 32-bit unsigned integer values contained in the lower bits
-///    of the two 64-bit integer vectors and returns the 64-bit unsigned
-///    product.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PMULUDQ </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer containing one of the source operands.
-/// \param __b
-///    A 64-bit integer containing one of the source operands.
-/// \returns A 64-bit integer vector containing the product of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_mul_su32(__m64 __a, __m64 __b)
-{
-  return __builtin_ia32_pmuludq((__v2si)__a, (__v2si)__b);
-}
-
-/// Multiplies 32-bit unsigned integer values contained in the lower
-///    bits of the corresponding elements of two [2 x i64] vectors, and returns
-///    the 64-bit products in the corresponding elements of a [2 x i64] vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMULUDQ / PMULUDQ </c> instruction.
-///
-/// \param __a
-///    A [2 x i64] vector containing one of the source operands.
-/// \param __b
-///    A [2 x i64] vector containing one of the source operands.
-/// \returns A [2 x i64] vector containing the product of both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mul_epu32(__m128i __a, __m128i __b)
-{
-  return __builtin_ia32_pmuludq128((__v4si)__a, (__v4si)__b);
-}
-
-/// Computes the absolute differences of corresponding 8-bit integer
-///    values in two 128-bit vectors. Sums the first 8 absolute differences, and
-///    separately sums the second 8 absolute differences. Packs these two
-///    unsigned 16-bit integer sums into the upper and lower elements of a
-///    [2 x i64] vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSADBW / PSADBW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 128-bit integer vector containing one of the source operands.
-/// \returns A [2 x i64] vector containing the sums of the sets of absolute
-///    differences between both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sad_epu8(__m128i __a, __m128i __b)
-{
-  return __builtin_ia32_psadbw128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Subtracts the corresponding 8-bit integer values in the operands.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBB / PSUBB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v16qu)__a - (__v16qu)__b);
-}
-
-/// Subtracts the corresponding 16-bit integer values in the operands.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBW / PSUBW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v8hu)__a - (__v8hu)__b);
-}
-
-/// Subtracts the corresponding 32-bit integer values in the operands.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBD / PSUBD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4su)__a - (__v4su)__b);
-}
-
-/// Subtracts signed or unsigned 64-bit integer values and writes the
-///    difference to the corresponding bits in the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PSUBQ </c> instruction.
-///
-/// \param __a
-///    A 64-bit integer vector containing the minuend.
-/// \param __b
-///    A 64-bit integer vector containing the subtrahend.
-/// \returns A 64-bit integer vector containing the difference of the values in
-///    the operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_sub_si64(__m64 __a, __m64 __b)
-{
-  return (__m64)__builtin_ia32_psubq((__v1di)__a, (__v1di)__b);
-}
-
-/// Subtracts the corresponding elements of two [2 x i64] vectors.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBQ / PSUBQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a - (__v2du)__b);
-}
-
-/// Subtracts corresponding 8-bit signed integer values in the input and
-///    returns the differences in the corresponding bytes in the destination.
-///    Differences greater than 0x7F are saturated to 0x7F, and differences less
-///    than 0x80 are saturated to 0x80.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBSB / PSUBSB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubsb128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Subtracts corresponding 16-bit signed integer values in the input and
-///    returns the differences in the corresponding bytes in the destination.
-///    Differences greater than 0x7FFF are saturated to 0x7FFF, and values less
-///    than 0x8000 are saturated to 0x8000.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBSW / PSUBSW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the differences of the values
-///    in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubsw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Subtracts corresponding 8-bit unsigned integer values in the input
-///    and returns the differences in the corresponding bytes in the
-///    destination. Differences less than 0x00 are saturated to 0x00.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBUSB / PSUBUSB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the unsigned integer
-///    differences of the values in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epu8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubusb128((__v16qi)__a, (__v16qi)__b);
-}
-
-/// Subtracts corresponding 16-bit unsigned integer values in the input
-///    and returns the differences in the corresponding bytes in the
-///    destination. Differences less than 0x0000 are saturated to 0x0000.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSUBUSW / PSUBUSW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the minuends.
-/// \param __b
-///    A 128-bit integer vector containing the subtrahends.
-/// \returns A 128-bit integer vector containing the unsigned integer
-///    differences of the values in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epu16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_psubusw128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Performs a bitwise AND of two 128-bit integer vectors.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPAND / PAND </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 128-bit integer vector containing one of the source operands.
-/// \returns A 128-bit integer vector containing the bitwise AND of the values
-///    in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_and_si128(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a & (__v2du)__b);
-}
-
-/// Performs a bitwise AND of two 128-bit integer vectors, using the
-///    one's complement of the values contained in the first source operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPANDN / PANDN </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector containing the left source operand. The one's complement
-///    of this value is used in the bitwise AND.
-/// \param __b
-///    A 128-bit vector containing the right source operand.
-/// \returns A 128-bit integer vector containing the bitwise AND of the one's
-///    complement of the first operand and the values in the second operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_andnot_si128(__m128i __a, __m128i __b)
-{
-  return (__m128i)(~(__v2du)__a & (__v2du)__b);
-}
-/// Performs a bitwise OR of two 128-bit integer vectors.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPOR / POR </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 128-bit integer vector containing one of the source operands.
-/// \returns A 128-bit integer vector containing the bitwise OR of the values
-///    in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_or_si128(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a | (__v2du)__b);
-}
-
-/// Performs a bitwise exclusive OR of two 128-bit integer vectors.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPXOR / PXOR </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing one of the source operands.
-/// \param __b
-///    A 128-bit integer vector containing one of the source operands.
-/// \returns A 128-bit integer vector containing the bitwise exclusive OR of the
-///    values in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_xor_si128(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v2du)__a ^ (__v2du)__b);
-}
-
-/// Left-shifts the 128-bit integer vector operand by the specified
-///    number of bytes. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_slli_si128(__m128i a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPSLLDQ / PSLLDQ </c> instruction.
-///
-/// \param a
-///    A 128-bit integer vector containing the source operand.
-/// \param imm
-///    An immediate value specifying the number of bytes to left-shift operand
-///    \a a.
-/// \returns A 128-bit integer vector containing the left-shifted value.
-#define _mm_slli_si128(a, imm) \
-  (__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm))
-
-#define _mm_bslli_si128(a, imm) \
-  (__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm))
-
-/// Left-shifts each 16-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLW / PSLLW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to left-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi16(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_psllwi128((__v8hi)__a, __count);
-}
-
-/// Left-shifts each 16-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLW / PSLLW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to left-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi16(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_psllw128((__v8hi)__a, (__v8hi)__count);
-}
-
-/// Left-shifts each 32-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLD / PSLLD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to left-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi32(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_pslldi128((__v4si)__a, __count);
-}
-
-/// Left-shifts each 32-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLD / PSLLD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to left-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi32(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_pslld128((__v4si)__a, (__v4si)__count);
-}
-
-/// Left-shifts each 64-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLQ / PSLLQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to left-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi64(__m128i __a, int __count)
-{
-  return __builtin_ia32_psllqi128((__v2di)__a, __count);
-}
-
-/// Left-shifts each 64-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. Low-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSLLQ / PSLLQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to left-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi64(__m128i __a, __m128i __count)
-{
-  return __builtin_ia32_psllq128((__v2di)__a, (__v2di)__count);
-}
-
-/// Right-shifts each 16-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. High-order bits are filled with the sign
-///    bit of the initial value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRAW / PSRAW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to right-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srai_epi16(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_psrawi128((__v8hi)__a, __count);
-}
-
-/// Right-shifts each 16-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. High-order bits are filled with the sign
-///    bit of the initial value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRAW / PSRAW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to right-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sra_epi16(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_psraw128((__v8hi)__a, (__v8hi)__count);
-}
-
-/// Right-shifts each 32-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. High-order bits are filled with the sign
-///    bit of the initial value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRAD / PSRAD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to right-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srai_epi32(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_psradi128((__v4si)__a, __count);
-}
-
-/// Right-shifts each 32-bit value in the 128-bit integer vector operand
-///    by the specified number of bits. High-order bits are filled with the sign
-///    bit of the initial value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRAD / PSRAD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to right-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sra_epi32(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_psrad128((__v4si)__a, (__v4si)__count);
-}
-
-/// Right-shifts the 128-bit integer vector operand by the specified
-///    number of bytes. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_srli_si128(__m128i a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPSRLDQ / PSRLDQ </c> instruction.
-///
-/// \param a
-///    A 128-bit integer vector containing the source operand.
-/// \param imm
-///    An immediate value specifying the number of bytes to right-shift operand
-///    \a a.
-/// \returns A 128-bit integer vector containing the right-shifted value.
-#define _mm_srli_si128(a, imm) \
-  (__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm))
-
-#define _mm_bsrli_si128(a, imm) \
-  (__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm))
-
-/// Right-shifts each of 16-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLW / PSRLW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to right-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi16(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_psrlwi128((__v8hi)__a, __count);
-}
-
-/// Right-shifts each of 16-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLW / PSRLW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to right-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi16(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_psrlw128((__v8hi)__a, (__v8hi)__count);
-}
-
-/// Right-shifts each of 32-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLD / PSRLD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to right-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi32(__m128i __a, int __count)
-{
-  return (__m128i)__builtin_ia32_psrldi128((__v4si)__a, __count);
-}
-
-/// Right-shifts each of 32-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLD / PSRLD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to right-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi32(__m128i __a, __m128i __count)
-{
-  return (__m128i)__builtin_ia32_psrld128((__v4si)__a, (__v4si)__count);
-}
-
-/// Right-shifts each of 64-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLQ / PSRLQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    An integer value specifying the number of bits to right-shift each value
-///    in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi64(__m128i __a, int __count)
-{
-  return __builtin_ia32_psrlqi128((__v2di)__a, __count);
-}
-
-/// Right-shifts each of 64-bit values in the 128-bit integer vector
-///    operand by the specified number of bits. High-order bits are cleared.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPSRLQ / PSRLQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the source operand.
-/// \param __count
-///    A 128-bit integer vector in which bits [63:0] specify the number of bits
-///    to right-shift each value in operand \a __a.
-/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi64(__m128i __a, __m128i __count)
-{
-  return __builtin_ia32_psrlq128((__v2di)__a, (__v2di)__count);
-}
-
-/// Compares each of the corresponding 8-bit values of the 128-bit
-///    integer vectors for equality. Each comparison yields 0x0 for false, 0xFF
-///    for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPEQB / PCMPEQB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v16qi)__a == (__v16qi)__b);
-}
-
-/// Compares each of the corresponding 16-bit values of the 128-bit
-///    integer vectors for equality. Each comparison yields 0x0 for false,
-///    0xFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPEQW / PCMPEQW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v8hi)__a == (__v8hi)__b);
-}
-
-/// Compares each of the corresponding 32-bit values of the 128-bit
-///    integer vectors for equality. Each comparison yields 0x0 for false,
-///    0xFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPEQD / PCMPEQD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4si)__a == (__v4si)__b);
-}
-
-/// Compares each of the corresponding signed 8-bit values of the 128-bit
-///    integer vectors to determine if the values in the first operand are
-///    greater than those in the second operand. Each comparison yields 0x0 for
-///    false, 0xFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTB / PCMPGTB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi8(__m128i __a, __m128i __b)
-{
-  /* This function always performs a signed comparison, but __v16qi is a char
-     which may be signed or unsigned, so use __v16qs. */
-  return (__m128i)((__v16qs)__a > (__v16qs)__b);
-}
-
-/// Compares each of the corresponding signed 16-bit values of the
-///    128-bit integer vectors to determine if the values in the first operand
-///    are greater than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTW / PCMPGTW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v8hi)__a > (__v8hi)__b);
-}
-
-/// Compares each of the corresponding signed 32-bit values of the
-///    128-bit integer vectors to determine if the values in the first operand
-///    are greater than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTD / PCMPGTD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)((__v4si)__a > (__v4si)__b);
-}
-
-/// Compares each of the corresponding signed 8-bit values of the 128-bit
-///    integer vectors to determine if the values in the first operand are less
-///    than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTB / PCMPGTB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi8(__m128i __a, __m128i __b)
-{
-  return _mm_cmpgt_epi8(__b, __a);
-}
-
-/// Compares each of the corresponding signed 16-bit values of the
-///    128-bit integer vectors to determine if the values in the first operand
-///    are less than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTW / PCMPGTW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi16(__m128i __a, __m128i __b)
-{
-  return _mm_cmpgt_epi16(__b, __a);
-}
-
-/// Compares each of the corresponding signed 32-bit values of the
-///    128-bit integer vectors to determine if the values in the first operand
-///    are less than those in the second operand.
-///
-///    Each comparison yields 0x0 for false, 0xFFFFFFFF for true.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPCMPGTD / PCMPGTD </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __b
-///    A 128-bit integer vector.
-/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi32(__m128i __a, __m128i __b)
-{
-  return _mm_cmpgt_epi32(__b, __a);
-}
-
-#ifdef __x86_64__
-/// Converts a 64-bit signed integer value from the second operand into a
-///    double-precision value and returns it in the lower element of a [2 x
-///    double] vector; the upper element of the returned vector is copied from
-///    the upper element of the first operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSI2SD / CVTSI2SD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The upper 64 bits of this operand are
-///    copied to the upper 64 bits of the destination.
-/// \param __b
-///    A 64-bit signed integer operand containing the value to be converted.
-/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
-///    converted value of the second operand. The upper 64 bits are copied from
-///    the upper 64 bits of the first operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtsi64_sd(__m128d __a, long long __b)
-{
-  __a[0] = __b;
-  return __a;
-}
-
-/// Converts the first (lower) element of a vector of [2 x double] into a
-///    64-bit signed integer value, according to the current rounding mode.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTSD2SI / CVTSD2SI </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
-///    conversion.
-/// \returns A 64-bit signed integer containing the converted value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvtsd_si64(__m128d __a)
-{
-  return __builtin_ia32_cvtsd2si64((__v2df)__a);
-}
-
-/// Converts the first (lower) element of a vector of [2 x double] into a
-///    64-bit signed integer value, truncating the result when it is inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTSD2SI / CVTTSD2SI </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. The lower 64 bits are used in the
-///    conversion.
-/// \returns A 64-bit signed integer containing the converted value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvttsd_si64(__m128d __a)
-{
-  return __builtin_ia32_cvttsd2si64((__v2df)__a);
-}
-#endif
-
-/// Converts a vector of [4 x i32] into a vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTDQ2PS / CVTDQ2PS </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \returns A 128-bit vector of [4 x float] containing the converted values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtepi32_ps(__m128i __a)
-{
-  return (__m128)__builtin_convertvector((__v4si)__a, __v4sf);
-}
-
-/// Converts a vector of [4 x float] into a vector of [4 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTPS2DQ / CVTPS2DQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit integer vector of [4 x i32] containing the converted
-///    values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtps_epi32(__m128 __a)
-{
-  return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)__a);
-}
-
-/// Converts a vector of [4 x float] into a vector of [4 x i32],
-///    truncating the result when it is inexact.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VCVTTPS2DQ / CVTTPS2DQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x float].
-/// \returns A 128-bit vector of [4 x i32] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvttps_epi32(__m128 __a)
-{
-  return (__m128i)__builtin_ia32_cvttps2dq((__v4sf)__a);
-}
-
-/// Returns a vector of [4 x i32] where the lowest element is the input
-///    operand and the remaining elements are zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
-///
-/// \param __a
-///    A 32-bit signed integer operand.
-/// \returns A 128-bit vector of [4 x i32].
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtsi32_si128(int __a)
-{
-  return __extension__ (__m128i)(__v4si){ __a, 0, 0, 0 };
-}
-
-#ifdef __x86_64__
-/// Returns a vector of [2 x i64] where the lower element is the input
-///    operand and the upper element is zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __a
-///    A 64-bit signed integer operand containing the value to be converted.
-/// \returns A 128-bit vector of [2 x i64] containing the converted value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtsi64_si128(long long __a)
-{
-  return __extension__ (__m128i)(__v2di){ __a, 0 };
-}
-#endif
-
-/// Moves the least significant 32 bits of a vector of [4 x i32] to a
-///    32-bit signed integer value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
-///
-/// \param __a
-///    A vector of [4 x i32]. The least significant 32 bits are moved to the
-///    destination.
-/// \returns A 32-bit signed integer containing the moved value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvtsi128_si32(__m128i __a)
-{
-  __v4si __b = (__v4si)__a;
-  return __b[0];
-}
-
-#ifdef __x86_64__
-/// Moves the least significant 64 bits of a vector of [2 x i64] to a
-///    64-bit signed integer value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __a
-///    A vector of [2 x i64]. The least significant 64 bits are moved to the
-///    destination.
-/// \returns A 64-bit signed integer containing the moved value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvtsi128_si64(__m128i __a)
-{
-  return __a[0];
-}
-#endif
-
-/// Moves packed integer values from an aligned 128-bit memory location
-///    to elements in a 128-bit integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDQA / MOVDQA </c> instruction.
-///
-/// \param __p
-///    An aligned pointer to a memory location containing integer values.
-/// \returns A 128-bit integer vector containing the moved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_load_si128(__m128i const *__p)
-{
-  return *__p;
-}
-
-/// Moves packed integer values from an unaligned 128-bit memory location
-///    to elements in a 128-bit integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVDQU / MOVDQU </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location containing integer values.
-/// \returns A 128-bit integer vector containing the moved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si128(__m128i_u const *__p)
-{
-  struct __loadu_si128 {
-    __m128i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((struct __loadu_si128*)__p)->__v;
-}
-
-/// Returns a vector of [2 x i64] where the lower element is taken from
-///    the lower element of the operand, and the upper element is zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __p
-///    A 128-bit vector of [2 x i64]. Bits [63:0] are written to bits [63:0] of
-///    the destination.
-/// \returns A 128-bit vector of [2 x i64]. The lower order bits contain the
-///    moved value. The higher order bits are cleared.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadl_epi64(__m128i_u const *__p)
-{
-  struct __mm_loadl_epi64_struct {
-    long long __u;
-  } __attribute__((__packed__, __may_alias__));
-  return __extension__ (__m128i) { ((struct __mm_loadl_epi64_struct*)__p)->__u, 0};
-}
-
-/// Generates a 128-bit vector of [4 x i32] with unspecified content.
-///    This could be used as an argument to another intrinsic function where the
-///    argument is required but the value is not actually used.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \returns A 128-bit vector of [4 x i32] with unspecified content.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_undefined_si128(void)
-{
-  return (__m128i)__builtin_ia32_undef128();
-}
-
-/// Initializes both 64-bit values in a 128-bit vector of [2 x i64] with
-///    the specified 64-bit integer values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __q1
-///    A 64-bit integer value used to initialize the upper 64 bits of the
-///    destination vector of [2 x i64].
-/// \param __q0
-///    A 64-bit integer value used to initialize the lower 64 bits of the
-///    destination vector of [2 x i64].
-/// \returns An initialized 128-bit vector of [2 x i64] containing the values
-///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi64x(long long __q1, long long __q0)
-{
-  return __extension__ (__m128i)(__v2di){ __q0, __q1 };
-}
-
-/// Initializes both 64-bit values in a 128-bit vector of [2 x i64] with
-///    the specified 64-bit integer values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __q1
-///    A 64-bit integer value used to initialize the upper 64 bits of the
-///    destination vector of [2 x i64].
-/// \param __q0
-///    A 64-bit integer value used to initialize the lower 64 bits of the
-///    destination vector of [2 x i64].
-/// \returns An initialized 128-bit vector of [2 x i64] containing the values
-///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi64(__m64 __q1, __m64 __q0)
-{
-  return _mm_set_epi64x((long long)__q1, (long long)__q0);
-}
-
-/// Initializes the 32-bit values in a 128-bit vector of [4 x i32] with
-///    the specified 32-bit integer values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __i3
-///    A 32-bit integer value used to initialize bits [127:96] of the
-///    destination vector.
-/// \param __i2
-///    A 32-bit integer value used to initialize bits [95:64] of the destination
-///    vector.
-/// \param __i1
-///    A 32-bit integer value used to initialize bits [63:32] of the destination
-///    vector.
-/// \param __i0
-///    A 32-bit integer value used to initialize bits [31:0] of the destination
-///    vector.
-/// \returns An initialized 128-bit vector of [4 x i32] containing the values
-///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi32(int __i3, int __i2, int __i1, int __i0)
-{
-  return __extension__ (__m128i)(__v4si){ __i0, __i1, __i2, __i3};
-}
-
-/// Initializes the 16-bit values in a 128-bit vector of [8 x i16] with
-///    the specified 16-bit integer values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __w7
-///    A 16-bit integer value used to initialize bits [127:112] of the
-///    destination vector.
-/// \param __w6
-///    A 16-bit integer value used to initialize bits [111:96] of the
-///    destination vector.
-/// \param __w5
-///    A 16-bit integer value used to initialize bits [95:80] of the destination
-///    vector.
-/// \param __w4
-///    A 16-bit integer value used to initialize bits [79:64] of the destination
-///    vector.
-/// \param __w3
-///    A 16-bit integer value used to initialize bits [63:48] of the destination
-///    vector.
-/// \param __w2
-///    A 16-bit integer value used to initialize bits [47:32] of the destination
-///    vector.
-/// \param __w1
-///    A 16-bit integer value used to initialize bits [31:16] of the destination
-///    vector.
-/// \param __w0
-///    A 16-bit integer value used to initialize bits [15:0] of the destination
-///    vector.
-/// \returns An initialized 128-bit vector of [8 x i16] containing the values
-///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short __w2, short __w1, short __w0)
-{
-  return __extension__ (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 };
-}
-
-/// Initializes the 8-bit values in a 128-bit vector of [16 x i8] with
-///    the specified 8-bit integer values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __b15
-///    Initializes bits [127:120] of the destination vector.
-/// \param __b14
-///    Initializes bits [119:112] of the destination vector.
-/// \param __b13
-///    Initializes bits [111:104] of the destination vector.
-/// \param __b12
-///    Initializes bits [103:96] of the destination vector.
-/// \param __b11
-///    Initializes bits [95:88] of the destination vector.
-/// \param __b10
-///    Initializes bits [87:80] of the destination vector.
-/// \param __b9
-///    Initializes bits [79:72] of the destination vector.
-/// \param __b8
-///    Initializes bits [71:64] of the destination vector.
-/// \param __b7
-///    Initializes bits [63:56] of the destination vector.
-/// \param __b6
-///    Initializes bits [55:48] of the destination vector.
-/// \param __b5
-///    Initializes bits [47:40] of the destination vector.
-/// \param __b4
-///    Initializes bits [39:32] of the destination vector.
-/// \param __b3
-///    Initializes bits [31:24] of the destination vector.
-/// \param __b2
-///    Initializes bits [23:16] of the destination vector.
-/// \param __b1
-///    Initializes bits [15:8] of the destination vector.
-/// \param __b0
-///    Initializes bits [7:0] of the destination vector.
-/// \returns An initialized 128-bit vector of [16 x i8] containing the values
-///    provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b9, char __b8, char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0)
-{
-  return __extension__ (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 };
-}
-
-/// Initializes both values in a 128-bit integer vector with the
-///    specified 64-bit integer value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __q
-///    Integer value used to initialize the elements of the destination integer
-///    vector.
-/// \returns An initialized 128-bit integer vector of [2 x i64] with both
-///    elements containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi64x(long long __q)
-{
-  return _mm_set_epi64x(__q, __q);
-}
-
-/// Initializes both values in a 128-bit vector of [2 x i64] with the
-///    specified 64-bit value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __q
-///    A 64-bit value used to initialize the elements of the destination integer
-///    vector.
-/// \returns An initialized 128-bit vector of [2 x i64] with all elements
-///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi64(__m64 __q)
-{
-  return _mm_set_epi64(__q, __q);
-}
-
-/// Initializes all values in a 128-bit vector of [4 x i32] with the
-///    specified 32-bit value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __i
-///    A 32-bit value used to initialize the elements of the destination integer
-///    vector.
-/// \returns An initialized 128-bit vector of [4 x i32] with all elements
-///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi32(int __i)
-{
-  return _mm_set_epi32(__i, __i, __i, __i);
-}
-
-/// Initializes all values in a 128-bit vector of [8 x i16] with the
-///    specified 16-bit value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __w
-///    A 16-bit value used to initialize the elements of the destination integer
-///    vector.
-/// \returns An initialized 128-bit vector of [8 x i16] with all elements
-///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi16(short __w)
-{
-  return _mm_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w);
-}
-
-/// Initializes all values in a 128-bit vector of [16 x i8] with the
-///    specified 8-bit value.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __b
-///    An 8-bit value used to initialize the elements of the destination integer
-///    vector.
-/// \returns An initialized 128-bit vector of [16 x i8] with all elements
-///    containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi8(char __b)
-{
-  return _mm_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b);
-}
-
-/// Constructs a 128-bit integer vector, initialized in reverse order
-///     with the specified 64-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic does not correspond to a specific instruction.
-///
-/// \param __q0
-///    A 64-bit integral value used to initialize the lower 64 bits of the
-///    result.
-/// \param __q1
-///    A 64-bit integral value used to initialize the upper 64 bits of the
-///    result.
-/// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi64(__m64 __q0, __m64 __q1)
-{
-  return _mm_set_epi64(__q1, __q0);
-}
-
-/// Constructs a 128-bit integer vector, initialized in reverse order
-///     with the specified 32-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __i0
-///    A 32-bit integral value used to initialize bits [31:0] of the result.
-/// \param __i1
-///    A 32-bit integral value used to initialize bits [63:32] of the result.
-/// \param __i2
-///    A 32-bit integral value used to initialize bits [95:64] of the result.
-/// \param __i3
-///    A 32-bit integral value used to initialize bits [127:96] of the result.
-/// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi32(int __i0, int __i1, int __i2, int __i3)
-{
-  return _mm_set_epi32(__i3, __i2, __i1, __i0);
-}
-
-/// Constructs a 128-bit integer vector, initialized in reverse order
-///     with the specified 16-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __w0
-///    A 16-bit integral value used to initialize bits [15:0] of the result.
-/// \param __w1
-///    A 16-bit integral value used to initialize bits [31:16] of the result.
-/// \param __w2
-///    A 16-bit integral value used to initialize bits [47:32] of the result.
-/// \param __w3
-///    A 16-bit integral value used to initialize bits [63:48] of the result.
-/// \param __w4
-///    A 16-bit integral value used to initialize bits [79:64] of the result.
-/// \param __w5
-///    A 16-bit integral value used to initialize bits [95:80] of the result.
-/// \param __w6
-///    A 16-bit integral value used to initialize bits [111:96] of the result.
-/// \param __w7
-///    A 16-bit integral value used to initialize bits [127:112] of the result.
-/// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short __w5, short __w6, short __w7)
-{
-  return _mm_set_epi16(__w7, __w6, __w5, __w4, __w3, __w2, __w1, __w0);
-}
-
-/// Constructs a 128-bit integer vector, initialized in reverse order
-///     with the specified 8-bit integral values.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic is a utility function and does not correspond to a specific
-///    instruction.
-///
-/// \param __b0
-///    An 8-bit integral value used to initialize bits [7:0] of the result.
-/// \param __b1
-///    An 8-bit integral value used to initialize bits [15:8] of the result.
-/// \param __b2
-///    An 8-bit integral value used to initialize bits [23:16] of the result.
-/// \param __b3
-///    An 8-bit integral value used to initialize bits [31:24] of the result.
-/// \param __b4
-///    An 8-bit integral value used to initialize bits [39:32] of the result.
-/// \param __b5
-///    An 8-bit integral value used to initialize bits [47:40] of the result.
-/// \param __b6
-///    An 8-bit integral value used to initialize bits [55:48] of the result.
-/// \param __b7
-///    An 8-bit integral value used to initialize bits [63:56] of the result.
-/// \param __b8
-///    An 8-bit integral value used to initialize bits [71:64] of the result.
-/// \param __b9
-///    An 8-bit integral value used to initialize bits [79:72] of the result.
-/// \param __b10
-///    An 8-bit integral value used to initialize bits [87:80] of the result.
-/// \param __b11
-///    An 8-bit integral value used to initialize bits [95:88] of the result.
-/// \param __b12
-///    An 8-bit integral value used to initialize bits [103:96] of the result.
-/// \param __b13
-///    An 8-bit integral value used to initialize bits [111:104] of the result.
-/// \param __b14
-///    An 8-bit integral value used to initialize bits [119:112] of the result.
-/// \param __b15
-///    An 8-bit integral value used to initialize bits [127:120] of the result.
-/// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7, char __b8, char __b9, char __b10, char __b11, char __b12, char __b13, char __b14, char __b15)
-{
-  return _mm_set_epi8(__b15, __b14, __b13, __b12, __b11, __b10, __b9, __b8, __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
-}
-
-/// Creates a 128-bit integer vector initialized to zero.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VXORPS / XORPS </c> instruction.
-///
-/// \returns An initialized 128-bit integer vector with all elements set to
-///    zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setzero_si128(void)
-{
-  return __extension__ (__m128i)(__v2di){ 0LL, 0LL };
-}
-
-/// Stores a 128-bit integer vector to a memory location aligned on a
-///    128-bit boundary.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVAPS / MOVAPS </c> instruction.
-///
-/// \param __p
-///    A pointer to an aligned memory location that will receive the integer
-///    values.
-/// \param __b
-///    A 128-bit integer vector containing the values to be moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_si128(__m128i *__p, __m128i __b)
-{
-  *__p = __b;
-}
-
-/// Stores a 128-bit integer vector to an unaligned memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVUPS / MOVUPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a memory location that will receive the integer values.
-/// \param __b
-///    A 128-bit integer vector containing the values to be moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si128(__m128i_u *__p, __m128i __b)
-{
-  struct __storeu_si128 {
-    __m128i_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si128*)__p)->__v = __b;
-}
-
-/// Stores a 64-bit integer value from the low element of a 128-bit integer
-///    vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __p
-///    A pointer to a 64-bit memory location. The address of the memory
-///    location does not have to be algned.
-/// \param __b
-///    A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si64(void *__p, __m128i __b)
-{
-  struct __storeu_si64 {
-    long long __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si64*)__p)->__v = ((__v2di)__b)[0];
-}
-
-/// Stores a 32-bit integer value from the low element of a 128-bit integer
-///    vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
-///
-/// \param __p
-///    A pointer to a 32-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \param __b
-///    A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si32(void *__p, __m128i __b)
-{
-  struct __storeu_si32 {
-    int __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si32*)__p)->__v = ((__v4si)__b)[0];
-}
-
-/// Stores a 16-bit integer value from the low element of a 128-bit integer
-///    vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic does not correspond to a specific instruction.
-///
-/// \param __p
-///    A pointer to a 16-bit memory location. The address of the memory
-///    location does not have to be aligned.
-/// \param __b
-///    A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si16(void *__p, __m128i __b)
-{
-  struct __storeu_si16 {
-    short __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_si16*)__p)->__v = ((__v8hi)__b)[0];
-}
-
-/// Moves bytes selected by the mask from the first operand to the
-///    specified unaligned memory location. When a mask bit is 1, the
-///    corresponding byte is written, otherwise it is not written.
-///
-///    To minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon). Exception and trap behavior for elements not selected
-///    for storage to memory are implementation dependent.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMASKMOVDQU / MASKMOVDQU </c>
-///   instruction.
-///
-/// \param __d
-///    A 128-bit integer vector containing the values to be moved.
-/// \param __n
-///    A 128-bit integer vector containing the mask. The most significant bit of
-///    each byte represents the mask bits.
-/// \param __p
-///    A pointer to an unaligned 128-bit memory location where the specified
-///    values are moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p)
-{
-  __builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n, __p);
-}
-
-/// Stores the lower 64 bits of a 128-bit integer vector of [2 x i64] to
-///    a memory location.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVLPS / MOVLPS </c> instruction.
-///
-/// \param __p
-///    A pointer to a 64-bit memory location that will receive the lower 64 bits
-///    of the integer vector parameter.
-/// \param __a
-///    A 128-bit integer vector of [2 x i64]. The lower 64 bits contain the
-///    value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storel_epi64(__m128i_u *__p, __m128i __a)
-{
-  struct __mm_storel_epi64_struct {
-    long long __u;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __mm_storel_epi64_struct*)__p)->__u = __a[0];
-}
-
-/// Stores a 128-bit floating point vector of [2 x double] to a 128-bit
-///    aligned memory location.
-///
-///    To minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVNTPS / MOVNTPS </c> instruction.
-///
-/// \param __p
-///    A pointer to the 128-bit aligned memory location used to store the value.
-/// \param __a
-///    A vector of [2 x double] containing the 64-bit values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_stream_pd(double *__p, __m128d __a)
-{
-  __builtin_nontemporal_store((__v2df)__a, (__v2df*)__p);
-}
-
-/// Stores a 128-bit integer vector to a 128-bit aligned memory location.
-///
-///    To minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVNTPS / MOVNTPS </c> instruction.
-///
-/// \param __p
-///    A pointer to the 128-bit aligned memory location used to store the value.
-/// \param __a
-///    A 128-bit integer vector containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_stream_si128(__m128i *__p, __m128i __a)
-{
-  __builtin_nontemporal_store((__v2di)__a, (__v2di*)__p);
-}
-
-/// Stores a 32-bit integer value in the specified memory location.
-///
-///    To minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MOVNTI </c> instruction.
-///
-/// \param __p
-///    A pointer to the 32-bit memory location used to store the value.
-/// \param __a
-///    A 32-bit integer containing the value to be stored.
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
-_mm_stream_si32(int *__p, int __a)
-{
-  __builtin_ia32_movnti(__p, __a);
-}
-
-#ifdef __x86_64__
-/// Stores a 64-bit integer value in the specified memory location.
-///
-///    To minimize caching, the data is flagged as non-temporal (unlikely to be
-///    used again soon).
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MOVNTIQ </c> instruction.
-///
-/// \param __p
-///    A pointer to the 64-bit memory location used to store the value.
-/// \param __a
-///    A 64-bit integer containing the value to be stored.
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
-_mm_stream_si64(long long *__p, long long __a)
-{
-  __builtin_ia32_movnti64(__p, __a);
-}
-#endif
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/// The cache line containing \a __p is flushed and invalidated from all
-///    caches in the coherency domain.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CLFLUSH </c> instruction.
-///
-/// \param __p
-///    A pointer to the memory location used to identify the cache line to be
-///    flushed.
-void _mm_clflush(void const * __p);
-
-/// Forces strong memory ordering (serialization) between load
-///    instructions preceding this instruction and load instructions following
-///    this instruction, ensuring the system completes all previous loads before
-///    executing subsequent loads.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> LFENCE </c> instruction.
-///
-void _mm_lfence(void);
-
-/// Forces strong memory ordering (serialization) between load and store
-///    instructions preceding this instruction and load and store instructions
-///    following this instruction, ensuring that the system completes all
-///    previous memory accesses before executing subsequent memory accesses.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MFENCE </c> instruction.
-///
-void _mm_mfence(void);
-
-#if defined(__cplusplus)
-} // extern "C"
-#endif
-
-/// Converts 16-bit signed integers from both 128-bit integer vector
-///    operands into 8-bit signed integers, and packs the results into the
-///    destination. Positive values greater than 0x7F are saturated to 0x7F.
-///    Negative values less than 0x80 are saturated to 0x80.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPACKSSWB / PACKSSWB </c> instruction.
-///
-/// \param __a
-///   A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-///   a signed integer and is converted to a 8-bit signed integer with
-///   saturation. Values greater than 0x7F are saturated to 0x7F. Values less
-///   than 0x80 are saturated to 0x80. The converted [8 x i8] values are
-///   written to the lower 64 bits of the result.
-/// \param __b
-///   A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-///   a signed integer and is converted to a 8-bit signed integer with
-///   saturation. Values greater than 0x7F are saturated to 0x7F. Values less
-///   than 0x80 are saturated to 0x80. The converted [8 x i8] values are
-///   written to the higher 64 bits of the result.
-/// \returns A 128-bit vector of [16 x i8] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packs_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Converts 32-bit signed integers from both 128-bit integer vector
-///    operands into 16-bit signed integers, and packs the results into the
-///    destination. Positive values greater than 0x7FFF are saturated to 0x7FFF.
-///    Negative values less than 0x8000 are saturated to 0x8000.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPACKSSDW / PACKSSDW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as
-///    a signed integer and is converted to a 16-bit signed integer with
-///    saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values
-///    less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values
-///    are written to the lower 64 bits of the result.
-/// \param __b
-///    A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as
-///    a signed integer and is converted to a 16-bit signed integer with
-///    saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values
-///    less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values
-///    are written to the higher 64 bits of the result.
-/// \returns A 128-bit vector of [8 x i16] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packs_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b);
-}
-
-/// Converts 16-bit signed integers from both 128-bit integer vector
-///    operands into 8-bit unsigned integers, and packs the results into the
-///    destination. Values greater than 0xFF are saturated to 0xFF. Values less
-///    than 0x00 are saturated to 0x00.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPACKUSWB / PACKUSWB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-///    a signed integer and is converted to an 8-bit unsigned integer with
-///    saturation. Values greater than 0xFF are saturated to 0xFF. Values less
-///    than 0x00 are saturated to 0x00. The converted [8 x i8] values are
-///    written to the lower 64 bits of the result.
-/// \param __b
-///    A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
-///    a signed integer and is converted to an 8-bit unsigned integer with
-///    saturation. Values greater than 0xFF are saturated to 0xFF. Values less
-///    than 0x00 are saturated to 0x00. The converted [8 x i8] values are
-///    written to the higher 64 bits of the result.
-/// \returns A 128-bit vector of [16 x i8] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packus_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_ia32_packuswb128((__v8hi)__a, (__v8hi)__b);
-}
-
-/// Extracts 16 bits from a 128-bit integer vector of [8 x i16], using
-///    the immediate-value parameter as a selector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPEXTRW / PEXTRW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \param __imm
-///    An immediate value. Bits [2:0] selects values from \a __a to be assigned
-///    to bits[15:0] of the result. \n
-///    000: assign values from bits [15:0] of \a __a. \n
-///    001: assign values from bits [31:16] of \a __a. \n
-///    010: assign values from bits [47:32] of \a __a. \n
-///    011: assign values from bits [63:48] of \a __a. \n
-///    100: assign values from bits [79:64] of \a __a. \n
-///    101: assign values from bits [95:80] of \a __a. \n
-///    110: assign values from bits [111:96] of \a __a. \n
-///    111: assign values from bits [127:112] of \a __a.
-/// \returns An integer, whose lower 16 bits are selected from the 128-bit
-///    integer vector parameter and the remaining bits are assigned zeros.
-#define _mm_extract_epi16(a, imm) \
-  (int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a), \
-                                                   (int)(imm))
-
-/// Constructs a 128-bit integer vector by first making a copy of the
-///    128-bit integer vector parameter, and then inserting the lower 16 bits
-///    of an integer parameter into an offset specified by the immediate-value
-///    parameter.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPINSRW / PINSRW </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector of [8 x i16]. This vector is copied to the
-///    result and then one of the eight elements in the result is replaced by
-///    the lower 16 bits of \a __b.
-/// \param __b
-///    An integer. The lower 16 bits of this parameter are written to the
-///    result beginning at an offset specified by \a __imm.
-/// \param __imm
-///    An immediate value specifying the bit offset in the result at which the
-///    lower 16 bits of \a __b are written.
-/// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi16(a, b, imm) \
-  (__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b), \
-                                       (int)(imm))
-
-/// Copies the values of the most significant bits from each 8-bit
-///    element in a 128-bit integer vector of [16 x i8] to create a 16-bit mask
-///    value, zero-extends the value, and writes it to the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPMOVMSKB / PMOVMSKB </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector containing the values with bits to be extracted.
-/// \returns The most significant bits from each 8-bit element in \a __a,
-///    written to bits [15:0]. The other bits are assigned zeros.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_movemask_epi8(__m128i __a)
-{
-  return __builtin_ia32_pmovmskb128((__v16qi)__a);
-}
-
-/// Constructs a 128-bit integer vector by shuffling four 32-bit
-///    elements of a 128-bit integer vector parameter, using the immediate-value
-///    parameter as a specifier.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_shuffle_epi32(__m128i a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPSHUFD / PSHUFD </c> instruction.
-///
-/// \param a
-///    A 128-bit integer vector containing the values to be copied.
-/// \param imm
-///    An immediate value containing an 8-bit value specifying which elements to
-///    copy from a. The destinations within the 128-bit destination are assigned
-///    values as follows: \n
-///    Bits [1:0] are used to assign values to bits [31:0] of the result. \n
-///    Bits [3:2] are used to assign values to bits [63:32] of the result. \n
-///    Bits [5:4] are used to assign values to bits [95:64] of the result. \n
-///    Bits [7:6] are used to assign values to bits [127:96] of the result. \n
-///    Bit value assignments: \n
-///    00: assign values from bits [31:0] of \a a. \n
-///    01: assign values from bits [63:32] of \a a. \n
-///    10: assign values from bits [95:64] of \a a. \n
-///    11: assign values from bits [127:96] of \a a.
-/// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shuffle_epi32(a, imm) \
-  (__m128i)__builtin_ia32_pshufd((__v4si)(__m128i)(a), (int)(imm))
-
-/// Constructs a 128-bit integer vector by shuffling four lower 16-bit
-///    elements of a 128-bit integer vector of [8 x i16], using the immediate
-///    value parameter as a specifier.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_shufflelo_epi16(__m128i a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPSHUFLW / PSHUFLW </c> instruction.
-///
-/// \param a
-///    A 128-bit integer vector of [8 x i16]. Bits [127:64] are copied to bits
-///    [127:64] of the result.
-/// \param imm
-///    An 8-bit immediate value specifying which elements to copy from \a a. \n
-///    Bits[1:0] are used to assign values to bits [15:0] of the result. \n
-///    Bits[3:2] are used to assign values to bits [31:16] of the result. \n
-///    Bits[5:4] are used to assign values to bits [47:32] of the result. \n
-///    Bits[7:6] are used to assign values to bits [63:48] of the result. \n
-///    Bit value assignments: \n
-///    00: assign values from bits [15:0] of \a a. \n
-///    01: assign values from bits [31:16] of \a a. \n
-///    10: assign values from bits [47:32] of \a a. \n
-///    11: assign values from bits [63:48] of \a a. \n
-/// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shufflelo_epi16(a, imm) \
-  (__m128i)__builtin_ia32_pshuflw((__v8hi)(__m128i)(a), (int)(imm))
-
-/// Constructs a 128-bit integer vector by shuffling four upper 16-bit
-///    elements of a 128-bit integer vector of [8 x i16], using the immediate
-///    value parameter as a specifier.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128i _mm_shufflehi_epi16(__m128i a, const int imm);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VPSHUFHW / PSHUFHW </c> instruction.
-///
-/// \param a
-///    A 128-bit integer vector of [8 x i16]. Bits [63:0] are copied to bits
-///    [63:0] of the result.
-/// \param imm
-///    An 8-bit immediate value specifying which elements to copy from \a a. \n
-///    Bits[1:0] are used to assign values to bits [79:64] of the result. \n
-///    Bits[3:2] are used to assign values to bits [95:80] of the result. \n
-///    Bits[5:4] are used to assign values to bits [111:96] of the result. \n
-///    Bits[7:6] are used to assign values to bits [127:112] of the result. \n
-///    Bit value assignments: \n
-///    00: assign values from bits [79:64] of \a a. \n
-///    01: assign values from bits [95:80] of \a a. \n
-///    10: assign values from bits [111:96] of \a a. \n
-///    11: assign values from bits [127:112] of \a a. \n
-/// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shufflehi_epi16(a, imm) \
-  (__m128i)__builtin_ia32_pshufhw((__v8hi)(__m128i)(a), (int)(imm))
-
-/// Unpacks the high-order (index 8-15) values from two 128-bit vectors
-///    of [16 x i8] and interleaves them into a 128-bit vector of [16 x i8].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKHBW / PUNPCKHBW </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [16 x i8].
-///    Bits [71:64] are written to bits [7:0] of the result. \n
-///    Bits [79:72] are written to bits [23:16] of the result. \n
-///    Bits [87:80] are written to bits [39:32] of the result. \n
-///    Bits [95:88] are written to bits [55:48] of the result. \n
-///    Bits [103:96] are written to bits [71:64] of the result. \n
-///    Bits [111:104] are written to bits [87:80] of the result. \n
-///    Bits [119:112] are written to bits [103:96] of the result. \n
-///    Bits [127:120] are written to bits [119:112] of the result.
-/// \param __b
-///    A 128-bit vector of [16 x i8]. \n
-///    Bits [71:64] are written to bits [15:8] of the result. \n
-///    Bits [79:72] are written to bits [31:24] of the result. \n
-///    Bits [87:80] are written to bits [47:40] of the result. \n
-///    Bits [95:88] are written to bits [63:56] of the result. \n
-///    Bits [103:96] are written to bits [79:72] of the result. \n
-///    Bits [111:104] are written to bits [95:88] of the result. \n
-///    Bits [119:112] are written to bits [111:104] of the result. \n
-///    Bits [127:120] are written to bits [127:120] of the result.
-/// \returns A 128-bit vector of [16 x i8] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
-}
-
-/// Unpacks the high-order (index 4-7) values from two 128-bit vectors of
-///    [8 x i16] and interleaves them into a 128-bit vector of [8 x i16].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKHWD / PUNPCKHWD </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [8 x i16].
-///    Bits [79:64] are written to bits [15:0] of the result. \n
-///    Bits [95:80] are written to bits [47:32] of the result. \n
-///    Bits [111:96] are written to bits [79:64] of the result. \n
-///    Bits [127:112] are written to bits [111:96] of the result.
-/// \param __b
-///    A 128-bit vector of [8 x i16].
-///    Bits [79:64] are written to bits [31:16] of the result. \n
-///    Bits [95:80] are written to bits [63:48] of the result. \n
-///    Bits [111:96] are written to bits [95:80] of the result. \n
-///    Bits [127:112] are written to bits [127:112] of the result.
-/// \returns A 128-bit vector of [8 x i16] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
-}
-
-/// Unpacks the high-order (index 2,3) values from two 128-bit vectors of
-///    [4 x i32] and interleaves them into a 128-bit vector of [4 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKHDQ / PUNPCKHDQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x i32]. \n
-///    Bits [95:64] are written to bits [31:0] of the destination. \n
-///    Bits [127:96] are written to bits [95:64] of the destination.
-/// \param __b
-///    A 128-bit vector of [4 x i32]. \n
-///    Bits [95:64] are written to bits [64:32] of the destination. \n
-///    Bits [127:96] are written to bits [127:96] of the destination.
-/// \returns A 128-bit vector of [4 x i32] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4+2, 3, 4+3);
-}
-
-/// Unpacks the high-order 64-bit elements from two 128-bit vectors of
-///    [2 x i64] and interleaves them into a 128-bit vector of [2 x i64].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKHQDQ / PUNPCKHQDQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x i64]. \n
-///    Bits [127:64] are written to bits [63:0] of the destination.
-/// \param __b
-///    A 128-bit vector of [2 x i64]. \n
-///    Bits [127:64] are written to bits [127:64] of the destination.
-/// \returns A 128-bit vector of [2 x i64] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 1, 2+1);
-}
-
-/// Unpacks the low-order (index 0-7) values from two 128-bit vectors of
-///    [16 x i8] and interleaves them into a 128-bit vector of [16 x i8].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKLBW / PUNPCKLBW </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [16 x i8]. \n
-///    Bits [7:0] are written to bits [7:0] of the result. \n
-///    Bits [15:8] are written to bits [23:16] of the result. \n
-///    Bits [23:16] are written to bits [39:32] of the result. \n
-///    Bits [31:24] are written to bits [55:48] of the result. \n
-///    Bits [39:32] are written to bits [71:64] of the result. \n
-///    Bits [47:40] are written to bits [87:80] of the result. \n
-///    Bits [55:48] are written to bits [103:96] of the result. \n
-///    Bits [63:56] are written to bits [119:112] of the result.
-/// \param __b
-///    A 128-bit vector of [16 x i8].
-///    Bits [7:0] are written to bits [15:8] of the result. \n
-///    Bits [15:8] are written to bits [31:24] of the result. \n
-///    Bits [23:16] are written to bits [47:40] of the result. \n
-///    Bits [31:24] are written to bits [63:56] of the result. \n
-///    Bits [39:32] are written to bits [79:72] of the result. \n
-///    Bits [47:40] are written to bits [95:88] of the result. \n
-///    Bits [55:48] are written to bits [111:104] of the result. \n
-///    Bits [63:56] are written to bits [127:120] of the result.
-/// \returns A 128-bit vector of [16 x i8] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi8(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7);
-}
-
-/// Unpacks the low-order (index 0-3) values from each of the two 128-bit
-///    vectors of [8 x i16] and interleaves them into a 128-bit vector of
-///    [8 x i16].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKLWD / PUNPCKLWD </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [8 x i16].
-///    Bits [15:0] are written to bits [15:0] of the result. \n
-///    Bits [31:16] are written to bits [47:32] of the result. \n
-///    Bits [47:32] are written to bits [79:64] of the result. \n
-///    Bits [63:48] are written to bits [111:96] of the result.
-/// \param __b
-///    A 128-bit vector of [8 x i16].
-///    Bits [15:0] are written to bits [31:16] of the result. \n
-///    Bits [31:16] are written to bits [63:48] of the result. \n
-///    Bits [47:32] are written to bits [95:80] of the result. \n
-///    Bits [63:48] are written to bits [127:112] of the result.
-/// \returns A 128-bit vector of [8 x i16] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi16(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
-}
-
-/// Unpacks the low-order (index 0,1) values from two 128-bit vectors of
-///    [4 x i32] and interleaves them into a 128-bit vector of [4 x i32].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKLDQ / PUNPCKLDQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [4 x i32]. \n
-///    Bits [31:0] are written to bits [31:0] of the destination. \n
-///    Bits [63:32] are written to bits [95:64] of the destination.
-/// \param __b
-///    A 128-bit vector of [4 x i32]. \n
-///    Bits [31:0] are written to bits [64:32] of the destination. \n
-///    Bits [63:32] are written to bits [127:96] of the destination.
-/// \returns A 128-bit vector of [4 x i32] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi32(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4+0, 1, 4+1);
-}
-
-/// Unpacks the low-order 64-bit elements from two 128-bit vectors of
-///    [2 x i64] and interleaves them into a 128-bit vector of [2 x i64].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VPUNPCKLQDQ / PUNPCKLQDQ </c>
-///   instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x i64]. \n
-///    Bits [63:0] are written to bits [63:0] of the destination. \n
-/// \param __b
-///    A 128-bit vector of [2 x i64]. \n
-///    Bits [63:0] are written to bits [127:64] of the destination. \n
-/// \returns A 128-bit vector of [2 x i64] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi64(__m128i __a, __m128i __b)
-{
-  return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 0, 2+0);
-}
-
-/// Returns the lower 64 bits of a 128-bit integer vector as a 64-bit
-///    integer.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MOVDQ2Q </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector operand. The lower 64 bits are moved to the
-///    destination.
-/// \returns A 64-bit integer containing the lower 64 bits of the parameter.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_movepi64_pi64(__m128i __a)
-{
-  return (__m64)__a[0];
-}
-
-/// Moves the 64-bit operand to a 128-bit integer vector, zeroing the
-///    upper bits.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> MOVD+VMOVQ </c> instruction.
-///
-/// \param __a
-///    A 64-bit value.
-/// \returns A 128-bit integer vector. The lower 64 bits contain the value from
-///    the operand. The upper 64 bits are assigned zeros.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_movpi64_epi64(__m64 __a)
-{
-  return __extension__ (__m128i)(__v2di){ (long long)__a, 0 };
-}
-
-/// Moves the lower 64 bits of a 128-bit integer vector to a 128-bit
-///    integer vector, zeroing the upper bits.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
-///
-/// \param __a
-///    A 128-bit integer vector operand. The lower 64 bits are moved to the
-///    destination.
-/// \returns A 128-bit integer vector. The lower 64 bits contain the value from
-///    the operand. The upper 64 bits are assigned zeros.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_move_epi64(__m128i __a)
-{
-  return __builtin_shufflevector((__v2di)__a, _mm_setzero_si128(), 0, 2);
-}
-
-/// Unpacks the high-order 64-bit elements from two 128-bit vectors of
-///    [2 x double] and interleaves them into a 128-bit vector of [2 x
-///    double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKHPD / UNPCKHPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [127:64] are written to bits [63:0] of the destination.
-/// \param __b
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [127:64] are written to bits [127:64] of the destination.
-/// \returns A 128-bit vector of [2 x double] containing the interleaved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_unpackhi_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 1, 2+1);
-}
-
-/// Unpacks the low-order 64-bit elements from two 128-bit vectors
-///    of [2 x double] and interleaves them into a 128-bit vector of [2 x
-///    double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VUNPCKLPD / UNPCKLPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [63:0] are written to bits [63:0] of the destination.
-/// \param __b
-///    A 128-bit vector of [2 x double]. \n
-///    Bits [63:0] are written to bits [127:64] of the destination.
-/// \returns A 128-bit vector of [2 x double] containing the interleaved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_unpacklo_pd(__m128d __a, __m128d __b)
-{
-  return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 0, 2+0);
-}
-
-/// Extracts the sign bits of the double-precision values in the 128-bit
-///    vector of [2 x double], zero-extends the value, and writes it to the
-///    low-order bits of the destination.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VMOVMSKPD / MOVMSKPD </c> instruction.
-///
-/// \param __a
-///    A 128-bit vector of [2 x double] containing the values with sign bits to
-///    be extracted.
-/// \returns The sign bits from each of the double-precision elements in \a __a,
-///    written to bits [1:0]. The remaining bits are assigned values of zero.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_movemask_pd(__m128d __a)
-{
-  return __builtin_ia32_movmskpd((__v2df)__a);
-}
-
-
-/// Constructs a 128-bit floating-point vector of [2 x double] from two
-///    128-bit vector parameters of [2 x double], using the immediate-value
-///     parameter as a specifier.
-///
-/// \headerfile <x86intrin.h>
-///
-/// \code
-/// __m128d _mm_shuffle_pd(__m128d a, __m128d b, const int i);
-/// \endcode
-///
-/// This intrinsic corresponds to the <c> VSHUFPD / SHUFPD </c> instruction.
-///
-/// \param a
-///    A 128-bit vector of [2 x double].
-/// \param b
-///    A 128-bit vector of [2 x double].
-/// \param i
-///    An 8-bit immediate value. The least significant two bits specify which
-///    elements to copy from \a a and \a b: \n
-///    Bit[0] = 0: lower element of \a a copied to lower element of result. \n
-///    Bit[0] = 1: upper element of \a a copied to lower element of result. \n
-///    Bit[1] = 0: lower element of \a b copied to upper element of result. \n
-///    Bit[1] = 1: upper element of \a b copied to upper element of result. \n
-/// \returns A 128-bit vector of [2 x double] containing the shuffled values.
-#define _mm_shuffle_pd(a, b, i) \
-  (__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
-                                 (int)(i))
-
-/// Casts a 128-bit floating-point vector of [2 x double] into a 128-bit
-///    floating-point vector of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [2 x double].
-/// \returns A 128-bit floating-point vector of [4 x float] containing the same
-///    bitwise pattern as the parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_castpd_ps(__m128d __a)
-{
-  return (__m128)__a;
-}
-
-/// Casts a 128-bit floating-point vector of [2 x double] into a 128-bit
-///    integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [2 x double].
-/// \returns A 128-bit integer vector containing the same bitwise pattern as the
-///    parameter.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_castpd_si128(__m128d __a)
-{
-  return (__m128i)__a;
-}
-
-/// Casts a 128-bit floating-point vector of [4 x float] into a 128-bit
-///    floating-point vector of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [4 x float].
-/// \returns A 128-bit floating-point vector of [2 x double] containing the same
-///    bitwise pattern as the parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_castps_pd(__m128 __a)
-{
-  return (__m128d)__a;
-}
-
-/// Casts a 128-bit floating-point vector of [4 x float] into a 128-bit
-///    integer vector.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit floating-point vector of [4 x float].
-/// \returns A 128-bit integer vector containing the same bitwise pattern as the
-///    parameter.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_castps_si128(__m128 __a)
-{
-  return (__m128i)__a;
-}
-
-/// Casts a 128-bit integer vector into a 128-bit floating-point vector
-///    of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \returns A 128-bit floating-point vector of [4 x float] containing the same
-///    bitwise pattern as the parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_castsi128_ps(__m128i __a)
-{
-  return (__m128)__a;
-}
-
-/// Casts a 128-bit integer vector into a 128-bit floating-point vector
-///    of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic has no corresponding instruction.
-///
-/// \param __a
-///    A 128-bit integer vector.
-/// \returns A 128-bit floating-point vector of [2 x double] containing the same
-///    bitwise pattern as the parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_castsi128_pd(__m128i __a)
-{
-  return (__m128d)__a;
-}
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-/// Indicates that a spin loop is being executed for the purposes of
-///    optimizing power consumption during the loop.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> PAUSE </c> instruction.
-///
-void _mm_pause(void);
-
-#if defined(__cplusplus)
-} // extern "C"
-#endif
-#undef __DEFAULT_FN_ATTRS
-#undef __DEFAULT_FN_ATTRS_MMX
-
-#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
-
-#define _MM_DENORMALS_ZERO_ON   (0x0040)
-#define _MM_DENORMALS_ZERO_OFF  (0x0000)
-
-#define _MM_DENORMALS_ZERO_MASK (0x0040)
-
-#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
-#define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
-
-#endif /* __EMMINTRIN_H */
diff --git a/linux-x86/lib64/clang/10.0.1/include/fuzzer/FuzzedDataProvider.h b/linux-x86/lib64/clang/10.0.1/include/fuzzer/FuzzedDataProvider.h
deleted file mode 100644
index 59ef284..0000000
--- a/linux-x86/lib64/clang/10.0.1/include/fuzzer/FuzzedDataProvider.h
+++ /dev/null
@@ -1,299 +0,0 @@
-//===- FuzzedDataProvider.h - Utility header for fuzz targets ---*- C++ -* ===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-// A single header library providing an utility class to break up an array of
-// bytes. Whenever run on the same input, provides the same output, as long as
-// its methods are called in the same order, with the same arguments.
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
-#define LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
-
-#include <algorithm>
-#include <climits>
-#include <cstddef>
-#include <cstdint>
-#include <cstring>
-#include <initializer_list>
-#include <string>
-#include <type_traits>
-#include <utility>
-#include <vector>
-
-// In addition to the comments below, the API is also briefly documented at
-// https://github.com/google/fuzzing/blob/master/docs/split-inputs.md#fuzzed-data-provider
-class FuzzedDataProvider {
- public:
-  // |data| is an array of length |size| that the FuzzedDataProvider wraps to
-  // provide more granular access. |data| must outlive the FuzzedDataProvider.
-  FuzzedDataProvider(const uint8_t *data, size_t size)
-      : data_ptr_(data), remaining_bytes_(size) {}
-  ~FuzzedDataProvider() = default;
-
-  // Returns a std::vector containing |num_bytes| of input data. If fewer than
-  // |num_bytes| of data remain, returns a shorter std::vector containing all
-  // of the data that's left. Can be used with any byte sized type, such as
-  // char, unsigned char, uint8_t, etc.
-  template <typename T> std::vector<T> ConsumeBytes(size_t num_bytes) {
-    num_bytes = std::min(num_bytes, remaining_bytes_);
-    return ConsumeBytes<T>(num_bytes, num_bytes);
-  }
-
-  // Similar to |ConsumeBytes|, but also appends the terminator value at the end
-  // of the resulting vector. Useful, when a mutable null-terminated C-string is
-  // needed, for example. But that is a rare case. Better avoid it, if possible,
-  // and prefer using |ConsumeBytes| or |ConsumeBytesAsString| methods.
-  template <typename T>
-  std::vector<T> ConsumeBytesWithTerminator(size_t num_bytes,
-                                            T terminator = 0) {
-    num_bytes = std::min(num_bytes, remaining_bytes_);
-    std::vector<T> result = ConsumeBytes<T>(num_bytes + 1, num_bytes);
-    result.back() = terminator;
-    return result;
-  }
-
-  // Returns a std::string containing |num_bytes| of input data. Using this and
-  // |.c_str()| on the resulting string is the best way to get an immutable
-  // null-terminated C string. If fewer than |num_bytes| of data remain, returns
-  // a shorter std::string containing all of the data that's left.
-  std::string ConsumeBytesAsString(size_t num_bytes) {
-    static_assert(sizeof(std::string::value_type) == sizeof(uint8_t),
-                  "ConsumeBytesAsString cannot convert the data to a string.");
-
-    num_bytes = std::min(num_bytes, remaining_bytes_);
-    std::string result(
-        reinterpret_cast<const std::string::value_type *>(data_ptr_),
-        num_bytes);
-    Advance(num_bytes);
-    return result;
-  }
-
-  // Returns a number in the range [min, max] by consuming bytes from the
-  // input data. The value might not be uniformly distributed in the given
-  // range. If there's no input data left, always returns |min|. |min| must
-  // be less than or equal to |max|.
-  template <typename T> T ConsumeIntegralInRange(T min, T max) {
-    static_assert(std::is_integral<T>::value, "An integral type is required.");
-    static_assert(sizeof(T) <= sizeof(uint64_t), "Unsupported integral type.");
-
-    if (min > max)
-      abort();
-
-    // Use the biggest type possible to hold the range and the result.
-    uint64_t range = static_cast<uint64_t>(max) - min;
-    uint64_t result = 0;
-    size_t offset = 0;
-
-    while (offset < sizeof(T) * CHAR_BIT && (range >> offset) > 0 &&
-           remaining_bytes_ != 0) {
-      // Pull bytes off the end of the seed data. Experimentally, this seems to
-      // allow the fuzzer to more easily explore the input space. This makes
-      // sense, since it works by modifying inputs that caused new code to run,
-      // and this data is often used to encode length of data read by
-      // |ConsumeBytes|. Separating out read lengths makes it easier modify the
-      // contents of the data that is actually read.
-      --remaining_bytes_;
-      result = (result << CHAR_BIT) | data_ptr_[remaining_bytes_];
-      offset += CHAR_BIT;
-    }
-
-    // Avoid division by 0, in case |range + 1| results in overflow.
-    if (range != std::numeric_limits<decltype(range)>::max())
-      result = result % (range + 1);
-
-    return static_cast<T>(min + result);
-  }
-
-  // Returns a std::string of length from 0 to |max_length|. When it runs out of
-  // input data, returns what remains of the input. Designed to be more stable
-  // with respect to a fuzzer inserting characters than just picking a random
-  // length and then consuming that many bytes with |ConsumeBytes|.
-  std::string ConsumeRandomLengthString(size_t max_length) {
-    // Reads bytes from the start of |data_ptr_|. Maps "\\" to "\", and maps "\"
-    // followed by anything else to the end of the string. As a result of this
-    // logic, a fuzzer can insert characters into the string, and the string
-    // will be lengthened to include those new characters, resulting in a more
-    // stable fuzzer than picking the length of a string independently from
-    // picking its contents.
-    std::string result;
-
-    // Reserve the anticipated capaticity to prevent several reallocations.
-    result.reserve(std::min(max_length, remaining_bytes_));
-    for (size_t i = 0; i < max_length && remaining_bytes_ != 0; ++i) {
-      char next = ConvertUnsignedToSigned<char>(data_ptr_[0]);
-      Advance(1);
-      if (next == '\\' && remaining_bytes_ != 0) {
-        next = ConvertUnsignedToSigned<char>(data_ptr_[0]);
-        Advance(1);
-        if (next != '\\')
-          break;
-      }
-      result += next;
-    }
-
-    result.shrink_to_fit();
-    return result;
-  }
-
-  // Returns a std::vector containing all remaining bytes of the input data.
-  template <typename T> std::vector<T> ConsumeRemainingBytes() {
-    return ConsumeBytes<T>(remaining_bytes_);
-  }
-
-  // Returns a std::string containing all remaining bytes of the input data.
-  // Prefer using |ConsumeRemainingBytes| unless you actually need a std::string
-  // object.
-  std::string ConsumeRemainingBytesAsString() {
-    return ConsumeBytesAsString(remaining_bytes_);
-  }
-
-  // Returns a number in the range [Type's min, Type's max]. The value might
-  // not be uniformly distributed in the given range. If there's no input data
-  // left, always returns |min|.
-  template <typename T> T ConsumeIntegral() {
-    return ConsumeIntegralInRange(std::numeric_limits<T>::min(),
-                                  std::numeric_limits<T>::max());
-  }
-
-  // Reads one byte and returns a bool, or false when no data remains.
-  bool ConsumeBool() { return 1 & ConsumeIntegral<uint8_t>(); }
-
-  // Returns a copy of the value selected from the given fixed-size |array|.
-  template <typename T, size_t size>
-  T PickValueInArray(const T (&array)[size]) {
-    static_assert(size > 0, "The array must be non empty.");
-    return array[ConsumeIntegralInRange<size_t>(0, size - 1)];
-  }
-
-  template <typename T>
-  T PickValueInArray(std::initializer_list<const T> list) {
-    // TODO(Dor1s): switch to static_assert once C++14 is allowed.
-    if (!list.size())
-      abort();
-
-    return *(list.begin() + ConsumeIntegralInRange<size_t>(0, list.size() - 1));
-  }
-
-  // Returns an enum value. The enum must start at 0 and be contiguous. It must
-  // also contain |kMaxValue| aliased to its largest (inclusive) value. Such as:
-  // enum class Foo { SomeValue, OtherValue, kMaxValue = OtherValue };
-  template <typename T> T ConsumeEnum() {
-    static_assert(std::is_enum<T>::value, "|T| must be an enum type.");
-    return static_cast<T>(ConsumeIntegralInRange<uint32_t>(
-        0, static_cast<uint32_t>(T::kMaxValue)));
-  }
-
-  // Returns a floating point number in the range [0.0, 1.0]. If there's no
-  // input data left, always returns 0.
-  template <typename T> T ConsumeProbability() {
-    static_assert(std::is_floating_point<T>::value,
-                  "A floating point type is required.");
-
-    // Use different integral types for different floating point types in order
-    // to provide better density of the resulting values.
-    using IntegralType =
-        typename std::conditional<sizeof(T) <= sizeof(uint32_t), uint32_t,
-                                  uint64_t>::type;
-
-    T result = static_cast<T>(ConsumeIntegral<IntegralType>());
-    result /= static_cast<T>(std::numeric_limits<IntegralType>::max());
-    return result;
-  }
-
-  // Returns a floating point value in the range [Type's lowest, Type's max] by
-  // consuming bytes from the input data. If there's no input data left, always
-  // returns approximately 0.
-  template <typename T> T ConsumeFloatingPoint() {
-    return ConsumeFloatingPointInRange<T>(std::numeric_limits<T>::lowest(),
-                                          std::numeric_limits<T>::max());
-  }
-
-  // Returns a floating point value in the given range by consuming bytes from
-  // the input data. If there's no input data left, returns |min|. Note that
-  // |min| must be less than or equal to |max|.
-  template <typename T> T ConsumeFloatingPointInRange(T min, T max) {
-    if (min > max)
-      abort();
-
-    T range = .0;
-    T result = min;
-    constexpr T zero(.0);
-    if (max > zero && min < zero && max > min + std::numeric_limits<T>::max()) {
-      // The diff |max - min| would overflow the given floating point type. Use
-      // the half of the diff as the range and consume a bool to decide whether
-      // the result is in the first of the second part of the diff.
-      range = (max / 2.0) - (min / 2.0);
-      if (ConsumeBool()) {
-        result += range;
-      }
-    } else {
-      range = max - min;
-    }
-
-    return result + range * ConsumeProbability<T>();
-  }
-
-  // Reports the remaining bytes available for fuzzed input.
-  size_t remaining_bytes() { return remaining_bytes_; }
-
- private:
-  FuzzedDataProvider(const FuzzedDataProvider &) = delete;
-  FuzzedDataProvider &operator=(const FuzzedDataProvider &) = delete;
-
-  void Advance(size_t num_bytes) {
-    if (num_bytes > remaining_bytes_)
-      abort();
-
-    data_ptr_ += num_bytes;
-    remaining_bytes_ -= num_bytes;
-  }
-
-  template <typename T>
-  std::vector<T> ConsumeBytes(size_t size, size_t num_bytes_to_consume) {
-    static_assert(sizeof(T) == sizeof(uint8_t), "Incompatible data type.");
-
-    // The point of using the size-based constructor below is to increase the
-    // odds of having a vector object with capacity being equal to the length.
-    // That part is always implementation specific, but at least both libc++ and
-    // libstdc++ allocate the requested number of bytes in that constructor,
-    // which seems to be a natural choice for other implementations as well.
-    // To increase the odds even more, we also call |shrink_to_fit| below.
-    std::vector<T> result(size);
-    std::memcpy(result.data(), data_ptr_, num_bytes_to_consume);
-    Advance(num_bytes_to_consume);
-
-    // Even though |shrink_to_fit| is also implementation specific, we expect it
-    // to provide an additional assurance in case vector's constructor allocated
-    // a buffer which is larger than the actual amount of data we put inside it.
-    result.shrink_to_fit();
-    return result;
-  }
-
-  template <typename TS, typename TU> TS ConvertUnsignedToSigned(TU value) {
-    static_assert(sizeof(TS) == sizeof(TU), "Incompatible data types.");
-    static_assert(!std::numeric_limits<TU>::is_signed,
-                  "Source type must be unsigned.");
-
-    // TODO(Dor1s): change to `if constexpr` once C++17 becomes mainstream.
-    if (std::numeric_limits<TS>::is_modulo)
-      return static_cast<TS>(value);
-
-    // Avoid using implementation-defined unsigned to signer conversions.
-    // To learn more, see https://stackoverflow.com/questions/13150449.
-    if (value <= std::numeric_limits<TS>::max())
-      return static_cast<TS>(value);
-    else {
-      constexpr auto TS_min = std::numeric_limits<TS>::min();
-      return TS_min + static_cast<char>(value - TS_min);
-    }
-  }
-
-  const uint8_t *data_ptr_;
-  size_t remaining_bytes_;
-};
-
-#endif // LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
diff --git a/linux-x86/lib64/clang/10.0.1/include/immintrin.h b/linux-x86/lib64/clang/10.0.1/include/immintrin.h
deleted file mode 100644
index 7555ad8..0000000
--- a/linux-x86/lib64/clang/10.0.1/include/immintrin.h
+++ /dev/null
@@ -1,522 +0,0 @@
-/*===---- immintrin.h - Intel intrinsics -----------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __IMMINTRIN_H
-#define __IMMINTRIN_H
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MMX__)
-#include <mmintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE__)
-#include <xmmintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE2__)
-#include <emmintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSE3__)
-#include <pmmintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SSSE3__)
-#include <tmmintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__SSE4_2__) || defined(__SSE4_1__))
-#include <smmintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AES__) || defined(__PCLMUL__))
-#include <wmmintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLFLUSHOPT__)
-#include <clflushoptintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLWB__)
-#include <clwbintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX__)
-#include <avxintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX2__)
-#include <avx2intrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__F16C__)
-#include <f16cintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__VPCLMULQDQ__)
-#include <vpclmulqdqintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__)
-#include <bmiintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI2__)
-#include <bmi2intrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__LZCNT__)
-#include <lzcntintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__POPCNT__)
-#include <popcntintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FMA__)
-#include <fmaintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512F__)
-#include <avx512fintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VL__)
-#include <avx512vlintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BW__)
-#include <avx512bwintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BITALG__)
-#include <avx512bitalgintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512CD__)
-#include <avx512cdintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VPOPCNTDQ__)
-#include <avx512vpopcntdqintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AVX512VL__) && defined(__AVX512VPOPCNTDQ__))
-#include <avx512vpopcntdqvlintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VNNI__)
-#include <avx512vnniintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AVX512VL__) && defined(__AVX512VNNI__))
-#include <avx512vlvnniintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512DQ__)
-#include <avx512dqintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AVX512VL__) && defined(__AVX512BITALG__))
-#include <avx512vlbitalgintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AVX512VL__) && defined(__AVX512BW__))
-#include <avx512vlbwintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AVX512VL__) && defined(__AVX512CD__))
-#include <avx512vlcdintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AVX512VL__) && defined(__AVX512DQ__))
-#include <avx512vldqintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512ER__)
-#include <avx512erintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512IFMA__)
-#include <avx512ifmaintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AVX512IFMA__) && defined(__AVX512VL__))
-#include <avx512ifmavlintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VBMI__)
-#include <avx512vbmiintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AVX512VBMI__) && defined(__AVX512VL__))
-#include <avx512vbmivlintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512VBMI2__)
-#include <avx512vbmi2intrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AVX512VBMI2__) && defined(__AVX512VL__))
-#include <avx512vlvbmi2intrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512PF__)
-#include <avx512pfintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__AVX512BF16__)
-#include <avx512bf16intrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-    (defined(__AVX512VL__) && defined(__AVX512BF16__))
-#include <avx512vlbf16intrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PKU__)
-#include <pkuintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__VAES__)
-#include <vaesintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__GFNI__)
-#include <gfniintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDPID__)
-/// Returns the value of the IA32_TSC_AUX MSR (0xc0000103).
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> RDPID </c> instruction.
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("rdpid")))
-_rdpid_u32(void) {
-  return __builtin_ia32_rdpid();
-}
-#endif // __RDPID__
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDRND__)
-static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
-_rdrand16_step(unsigned short *__p)
-{
-  return __builtin_ia32_rdrand16_step(__p);
-}
-
-static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
-_rdrand32_step(unsigned int *__p)
-{
-  return __builtin_ia32_rdrand32_step(__p);
-}
-
-#ifdef __x86_64__
-static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
-_rdrand64_step(unsigned long long *__p)
-{
-  return __builtin_ia32_rdrand64_step(__p);
-}
-#endif
-#endif /* __RDRND__ */
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FSGSBASE__)
-#ifdef __x86_64__
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_readfsbase_u32(void)
-{
-  return __builtin_ia32_rdfsbase32();
-}
-
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_readfsbase_u64(void)
-{
-  return __builtin_ia32_rdfsbase64();
-}
-
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_readgsbase_u32(void)
-{
-  return __builtin_ia32_rdgsbase32();
-}
-
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_readgsbase_u64(void)
-{
-  return __builtin_ia32_rdgsbase64();
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_writefsbase_u32(unsigned int __V)
-{
-  __builtin_ia32_wrfsbase32(__V);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_writefsbase_u64(unsigned long long __V)
-{
-  __builtin_ia32_wrfsbase64(__V);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_writegsbase_u32(unsigned int __V)
-{
-  __builtin_ia32_wrgsbase32(__V);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_writegsbase_u64(unsigned long long __V)
-{
-  __builtin_ia32_wrgsbase64(__V);
-}
-
-#endif
-#endif /* __FSGSBASE__ */
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__MOVBE__)
-
-/* The structs used below are to force the load/store to be unaligned. This
- * is accomplished with the __packed__ attribute. The __may_alias__ prevents
- * tbaa metadata from being generated based on the struct and the type of the
- * field inside of it.
- */
-
-static __inline__ short __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_loadbe_i16(void const * __P) {
-  struct __loadu_i16 {
-    short __v;
-  } __attribute__((__packed__, __may_alias__));
-  return __builtin_bswap16(((struct __loadu_i16*)__P)->__v);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_storebe_i16(void * __P, short __D) {
-  struct __storeu_i16 {
-    short __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_i16*)__P)->__v = __builtin_bswap16(__D);
-}
-
-static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_loadbe_i32(void const * __P) {
-  struct __loadu_i32 {
-    int __v;
-  } __attribute__((__packed__, __may_alias__));
-  return __builtin_bswap32(((struct __loadu_i32*)__P)->__v);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_storebe_i32(void * __P, int __D) {
-  struct __storeu_i32 {
-    int __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_i32*)__P)->__v = __builtin_bswap32(__D);
-}
-
-#ifdef __x86_64__
-static __inline__ long long __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_loadbe_i64(void const * __P) {
-  struct __loadu_i64 {
-    long long __v;
-  } __attribute__((__packed__, __may_alias__));
-  return __builtin_bswap64(((struct __loadu_i64*)__P)->__v);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_storebe_i64(void * __P, long long __D) {
-  struct __storeu_i64 {
-    long long __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_i64*)__P)->__v = __builtin_bswap64(__D);
-}
-#endif
-#endif /* __MOVBE */
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RTM__)
-#include <rtmintrin.h>
-#include <xtestintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SHA__)
-#include <shaintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__FXSR__)
-#include <fxsrintrin.h>
-#endif
-
-/* No feature check desired due to internal MSC_VER checks */
-#include <xsaveintrin.h>
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVEOPT__)
-#include <xsaveoptintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVEC__)
-#include <xsavecintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__XSAVES__)
-#include <xsavesintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SHSTK__)
-#include <cetintrin.h>
-#endif
-
-/* Some intrinsics inside adxintrin.h are available only on processors with ADX,
- * whereas others are also available at all times. */
-#include <adxintrin.h>
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__RDSEED__)
-#include <rdseedintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__WBNOINVD__)
-#include <wbnoinvdintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__CLDEMOTE__)
-#include <cldemoteintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__WAITPKG__)
-#include <waitpkgintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-  defined(__MOVDIRI__) || defined(__MOVDIR64B__)
-#include <movdirintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PCONFIG__)
-#include <pconfigintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__SGX__)
-#include <sgxintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__PTWRITE__)
-#include <ptwriteintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__INVPCID__)
-#include <invpcidintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-  defined(__AVX512VP2INTERSECT__)
-#include <avx512vp2intersectintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || \
-  (defined(__AVX512VL__) && defined(__AVX512VP2INTERSECT__))
-#include <avx512vlvp2intersectintrin.h>
-#endif
-
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__ENQCMD__)
-#include <enqcmdintrin.h>
-#endif
-
-#if defined(_MSC_VER) && __has_extension(gnu_asm)
-/* Define the default attributes for these intrinsics */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
-#ifdef __cplusplus
-extern "C" {
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Exchange HLE
-\*----------------------------------------------------------------------------*/
-#if defined(__i386__) || defined(__x86_64__)
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchange_HLEAcquire(long volatile *_Target, long _Value) {
-  __asm__ __volatile__(".byte 0xf2 ; lock ; xchg %0, %1"
-                       : "+r" (_Value), "+m" (*_Target) :: "memory");
-  return _Value;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchange_HLERelease(long volatile *_Target, long _Value) {
-  __asm__ __volatile__(".byte 0xf3 ; lock ; xchg %0, %1"
-                       : "+r" (_Value), "+m" (*_Target) :: "memory");
-  return _Value;
-}
-#endif
-#if defined(__x86_64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchange64_HLEAcquire(__int64 volatile *_Target, __int64 _Value) {
-  __asm__ __volatile__(".byte 0xf2 ; lock ; xchg %0, %1"
-                       : "+r" (_Value), "+m" (*_Target) :: "memory");
-  return _Value;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchange64_HLERelease(__int64 volatile *_Target, __int64 _Value) {
-  __asm__ __volatile__(".byte 0xf3 ; lock ; xchg %0, %1"
-                       : "+r" (_Value), "+m" (*_Target) :: "memory");
-  return _Value;
-}
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Compare Exchange HLE
-\*----------------------------------------------------------------------------*/
-#if defined(__i386__) || defined(__x86_64__)
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange_HLEAcquire(long volatile *_Destination,
-                              long _Exchange, long _Comparand) {
-  __asm__ __volatile__(".byte 0xf2 ; lock ; cmpxchg %2, %1"
-                       : "+a" (_Comparand), "+m" (*_Destination)
-                       : "r" (_Exchange) : "memory");
-  return _Comparand;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange_HLERelease(long volatile *_Destination,
-                              long _Exchange, long _Comparand) {
-  __asm__ __volatile__(".byte 0xf3 ; lock ; cmpxchg %2, %1"
-                       : "+a" (_Comparand), "+m" (*_Destination)
-                       : "r" (_Exchange) : "memory");
-  return _Comparand;
-}
-#endif
-#if defined(__x86_64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64_HLEAcquire(__int64 volatile *_Destination,
-                              __int64 _Exchange, __int64 _Comparand) {
-  __asm__ __volatile__(".byte 0xf2 ; lock ; cmpxchg %2, %1"
-                       : "+a" (_Comparand), "+m" (*_Destination)
-                       : "r" (_Exchange) : "memory");
-  return _Comparand;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64_HLERelease(__int64 volatile *_Destination,
-                              __int64 _Exchange, __int64 _Comparand) {
-  __asm__ __volatile__(".byte 0xf3 ; lock ; cmpxchg %2, %1"
-                       : "+a" (_Comparand), "+m" (*_Destination)
-                       : "r" (_Exchange) : "memory");
-  return _Comparand;
-}
-#endif
-#ifdef __cplusplus
-}
-#endif
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* defined(_MSC_VER) && __has_extension(gnu_asm) */
-
-#endif /* __IMMINTRIN_H */
diff --git a/linux-x86/lib64/clang/10.0.1/include/intrin.h b/linux-x86/lib64/clang/10.0.1/include/intrin.h
deleted file mode 100644
index 9786ba1..0000000
--- a/linux-x86/lib64/clang/10.0.1/include/intrin.h
+++ /dev/null
@@ -1,589 +0,0 @@
-/* ===-------- intrin.h ---------------------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-/* Only include this if we're compiling for the windows platform. */
-#ifndef _MSC_VER
-#include_next <intrin.h>
-#else
-
-#ifndef __INTRIN_H
-#define __INTRIN_H
-
-/* First include the standard intrinsics. */
-#if defined(__i386__) || defined(__x86_64__)
-#include <x86intrin.h>
-#endif
-
-#if defined(__arm__)
-#include <armintr.h>
-#endif
-
-#if defined(__aarch64__)
-#include <arm64intr.h>
-#endif
-
-/* For the definition of jmp_buf. */
-#if __STDC_HOSTED__
-#include <setjmp.h>
-#endif
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#if defined(__MMX__)
-/* And the random ones that aren't in those files. */
-__m64 _m_from_float(float);
-float _m_to_float(__m64);
-#endif
-
-/* Other assorted instruction intrinsics. */
-void __addfsbyte(unsigned long, unsigned char);
-void __addfsdword(unsigned long, unsigned long);
-void __addfsword(unsigned long, unsigned short);
-void __code_seg(const char *);
-static __inline__
-void __cpuid(int[4], int);
-static __inline__
-void __cpuidex(int[4], int, int);
-static __inline__
-__int64 __emul(int, int);
-static __inline__
-unsigned __int64 __emulu(unsigned int, unsigned int);
-unsigned int __getcallerseflags(void);
-static __inline__
-void __halt(void);
-unsigned char __inbyte(unsigned short);
-void __inbytestring(unsigned short, unsigned char *, unsigned long);
-void __incfsbyte(unsigned long);
-void __incfsdword(unsigned long);
-void __incfsword(unsigned long);
-unsigned long __indword(unsigned short);
-void __indwordstring(unsigned short, unsigned long *, unsigned long);
-void __int2c(void);
-void __invlpg(void *);
-unsigned short __inword(unsigned short);
-void __inwordstring(unsigned short, unsigned short *, unsigned long);
-void __lidt(void *);
-unsigned __int64 __ll_lshift(unsigned __int64, int);
-__int64 __ll_rshift(__int64, int);
-static __inline__
-void __movsb(unsigned char *, unsigned char const *, size_t);
-static __inline__
-void __movsd(unsigned long *, unsigned long const *, size_t);
-static __inline__
-void __movsw(unsigned short *, unsigned short const *, size_t);
-static __inline__
-void __nop(void);
-void __nvreg_restore_fence(void);
-void __nvreg_save_fence(void);
-void __outbyte(unsigned short, unsigned char);
-void __outbytestring(unsigned short, unsigned char *, unsigned long);
-void __outdword(unsigned short, unsigned long);
-void __outdwordstring(unsigned short, unsigned long *, unsigned long);
-void __outword(unsigned short, unsigned short);
-void __outwordstring(unsigned short, unsigned short *, unsigned long);
-unsigned long __readcr0(void);
-unsigned long __readcr2(void);
-static __inline__
-unsigned long __readcr3(void);
-unsigned long __readcr4(void);
-unsigned long __readcr8(void);
-unsigned int __readdr(unsigned int);
-#ifdef __i386__
-static __inline__
-unsigned char __readfsbyte(unsigned long);
-static __inline__
-unsigned __int64 __readfsqword(unsigned long);
-static __inline__
-unsigned short __readfsword(unsigned long);
-#endif
-static __inline__
-unsigned __int64 __readmsr(unsigned long);
-unsigned __int64 __readpmc(unsigned long);
-unsigned long __segmentlimit(unsigned long);
-void __sidt(void *);
-static __inline__
-void __stosb(unsigned char *, unsigned char, size_t);
-static __inline__
-void __stosd(unsigned long *, unsigned long, size_t);
-static __inline__
-void __stosw(unsigned short *, unsigned short, size_t);
-void __svm_clgi(void);
-void __svm_invlpga(void *, int);
-void __svm_skinit(int);
-void __svm_stgi(void);
-void __svm_vmload(size_t);
-void __svm_vmrun(size_t);
-void __svm_vmsave(size_t);
-void __ud2(void);
-unsigned __int64 __ull_rshift(unsigned __int64, int);
-void __vmx_off(void);
-void __vmx_vmptrst(unsigned __int64 *);
-void __wbinvd(void);
-void __writecr0(unsigned int);
-static __inline__
-void __writecr3(unsigned int);
-void __writecr4(unsigned int);
-void __writecr8(unsigned int);
-void __writedr(unsigned int, unsigned int);
-void __writefsbyte(unsigned long, unsigned char);
-void __writefsdword(unsigned long, unsigned long);
-void __writefsqword(unsigned long, unsigned __int64);
-void __writefsword(unsigned long, unsigned short);
-void __writemsr(unsigned long, unsigned __int64);
-static __inline__
-void *_AddressOfReturnAddress(void);
-static __inline__
-unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
-static __inline__
-unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
-unsigned char _bittest(long const *, long);
-unsigned char _bittestandcomplement(long *, long);
-unsigned char _bittestandreset(long *, long);
-unsigned char _bittestandset(long *, long);
-void __cdecl _disable(void);
-void __cdecl _enable(void);
-long _InterlockedAddLargeStatistic(__int64 volatile *_Addend, long _Value);
-unsigned char _interlockedbittestandreset(long volatile *, long);
-unsigned char _interlockedbittestandset(long volatile *, long);
-void *_InterlockedCompareExchangePointer_HLEAcquire(void *volatile *, void *,
-                                                    void *);
-void *_InterlockedCompareExchangePointer_HLERelease(void *volatile *, void *,
-                                                    void *);
-long _InterlockedExchangeAdd_HLEAcquire(long volatile *, long);
-long _InterlockedExchangeAdd_HLERelease(long volatile *, long);
-__int64 _InterlockedExchangeAdd64_HLEAcquire(__int64 volatile *, __int64);
-__int64 _InterlockedExchangeAdd64_HLERelease(__int64 volatile *, __int64);
-void __cdecl _invpcid(unsigned int, void *);
-static __inline__ void
-__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
-_ReadBarrier(void);
-static __inline__ void
-__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
-_ReadWriteBarrier(void);
-unsigned int _rorx_u32(unsigned int, const unsigned int);
-int _sarx_i32(int, unsigned int);
-#if __STDC_HOSTED__
-int __cdecl _setjmp(jmp_buf);
-#endif
-unsigned int _shlx_u32(unsigned int, unsigned int);
-unsigned int _shrx_u32(unsigned int, unsigned int);
-void _Store_HLERelease(long volatile *, long);
-void _Store64_HLERelease(__int64 volatile *, __int64);
-void _StorePointer_HLERelease(void *volatile *, void *);
-static __inline__ void
-__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
-_WriteBarrier(void);
-unsigned __int32 xbegin(void);
-void _xend(void);
-
-/* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */
-#ifdef __x86_64__
-void __addgsbyte(unsigned long, unsigned char);
-void __addgsdword(unsigned long, unsigned long);
-void __addgsqword(unsigned long, unsigned __int64);
-void __addgsword(unsigned long, unsigned short);
-static __inline__
-void __faststorefence(void);
-void __incgsbyte(unsigned long);
-void __incgsdword(unsigned long);
-void __incgsqword(unsigned long);
-void __incgsword(unsigned long);
-static __inline__
-void __movsq(unsigned long long *, unsigned long long const *, size_t);
-static __inline__
-unsigned char __readgsbyte(unsigned long);
-static __inline__
-unsigned long __readgsdword(unsigned long);
-static __inline__
-unsigned __int64 __readgsqword(unsigned long);
-unsigned short __readgsword(unsigned long);
-unsigned __int64 __shiftleft128(unsigned __int64 _LowPart,
-                                unsigned __int64 _HighPart,
-                                unsigned char _Shift);
-unsigned __int64 __shiftright128(unsigned __int64 _LowPart,
-                                 unsigned __int64 _HighPart,
-                                 unsigned char _Shift);
-static __inline__
-void __stosq(unsigned __int64 *, unsigned __int64, size_t);
-unsigned char __vmx_on(unsigned __int64 *);
-unsigned char __vmx_vmclear(unsigned __int64 *);
-unsigned char __vmx_vmlaunch(void);
-unsigned char __vmx_vmptrld(unsigned __int64 *);
-unsigned char __vmx_vmread(size_t, size_t *);
-unsigned char __vmx_vmresume(void);
-unsigned char __vmx_vmwrite(size_t, size_t);
-void __writegsbyte(unsigned long, unsigned char);
-void __writegsdword(unsigned long, unsigned long);
-void __writegsqword(unsigned long, unsigned __int64);
-void __writegsword(unsigned long, unsigned short);
-unsigned char _bittest64(__int64 const *, __int64);
-unsigned char _bittestandcomplement64(__int64 *, __int64);
-unsigned char _bittestandreset64(__int64 *, __int64);
-unsigned char _bittestandset64(__int64 *, __int64);
-long _InterlockedAnd_np(long volatile *_Value, long _Mask);
-short _InterlockedAnd16_np(short volatile *_Value, short _Mask);
-__int64 _InterlockedAnd64_np(__int64 volatile *_Value, __int64 _Mask);
-char _InterlockedAnd8_np(char volatile *_Value, char _Mask);
-unsigned char _interlockedbittestandreset64(__int64 volatile *, __int64);
-unsigned char _interlockedbittestandset64(__int64 volatile *, __int64);
-long _InterlockedCompareExchange_np(long volatile *_Destination, long _Exchange,
-                                    long _Comparand);
-unsigned char _InterlockedCompareExchange128(__int64 volatile *_Destination,
-                                             __int64 _ExchangeHigh,
-                                             __int64 _ExchangeLow,
-                                             __int64 *_CompareandResult);
-unsigned char _InterlockedCompareExchange128_np(__int64 volatile *_Destination,
-                                                __int64 _ExchangeHigh,
-                                                __int64 _ExchangeLow,
-                                                __int64 *_ComparandResult);
-short _InterlockedCompareExchange16_np(short volatile *_Destination,
-                                       short _Exchange, short _Comparand);
-__int64 _InterlockedCompareExchange64_np(__int64 volatile *_Destination,
-                                         __int64 _Exchange, __int64 _Comparand);
-void *_InterlockedCompareExchangePointer_np(void *volatile *_Destination,
-                                            void *_Exchange, void *_Comparand);
-long _InterlockedOr_np(long volatile *_Value, long _Mask);
-short _InterlockedOr16_np(short volatile *_Value, short _Mask);
-__int64 _InterlockedOr64_np(__int64 volatile *_Value, __int64 _Mask);
-char _InterlockedOr8_np(char volatile *_Value, char _Mask);
-long _InterlockedXor_np(long volatile *_Value, long _Mask);
-short _InterlockedXor16_np(short volatile *_Value, short _Mask);
-__int64 _InterlockedXor64_np(__int64 volatile *_Value, __int64 _Mask);
-char _InterlockedXor8_np(char volatile *_Value, char _Mask);
-unsigned __int64 _rorx_u64(unsigned __int64, const unsigned int);
-__int64 _sarx_i64(__int64, unsigned int);
-unsigned __int64 _shlx_u64(unsigned __int64, unsigned int);
-unsigned __int64 _shrx_u64(unsigned __int64, unsigned int);
-static __inline__
-__int64 __mulh(__int64, __int64);
-static __inline__
-unsigned __int64 __umulh(unsigned __int64, unsigned __int64);
-static __inline__
-__int64 _mul128(__int64, __int64, __int64*);
-static __inline__
-unsigned __int64 _umul128(unsigned __int64,
-                          unsigned __int64,
-                          unsigned __int64*);
-
-#endif /* __x86_64__ */
-
-#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
-
-static __inline__
-unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
-static __inline__
-unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
-
-static __inline__
-__int64 _InterlockedDecrement64(__int64 volatile *_Addend);
-static __inline__
-__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value);
-static __inline__
-__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value);
-static __inline__
-__int64 _InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value);
-static __inline__
-__int64 _InterlockedIncrement64(__int64 volatile *_Addend);
-static __inline__
-__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask);
-static __inline__
-__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask);
-static __inline__
-__int64 _InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask);
-
-#endif
-
-/*----------------------------------------------------------------------------*\
-|* Interlocked Exchange Add
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedExchangeAdd8_acq(char volatile *_Addend, char _Value);
-char _InterlockedExchangeAdd8_nf(char volatile *_Addend, char _Value);
-char _InterlockedExchangeAdd8_rel(char volatile *_Addend, char _Value);
-short _InterlockedExchangeAdd16_acq(short volatile *_Addend, short _Value);
-short _InterlockedExchangeAdd16_nf(short volatile *_Addend, short _Value);
-short _InterlockedExchangeAdd16_rel(short volatile *_Addend, short _Value);
-long _InterlockedExchangeAdd_acq(long volatile *_Addend, long _Value);
-long _InterlockedExchangeAdd_nf(long volatile *_Addend, long _Value);
-long _InterlockedExchangeAdd_rel(long volatile *_Addend, long _Value);
-__int64 _InterlockedExchangeAdd64_acq(__int64 volatile *_Addend, __int64 _Value);
-__int64 _InterlockedExchangeAdd64_nf(__int64 volatile *_Addend, __int64 _Value);
-__int64 _InterlockedExchangeAdd64_rel(__int64 volatile *_Addend, __int64 _Value);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Increment
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-short _InterlockedIncrement16_acq(short volatile *_Value);
-short _InterlockedIncrement16_nf(short volatile *_Value);
-short _InterlockedIncrement16_rel(short volatile *_Value);
-long _InterlockedIncrement_acq(long volatile *_Value);
-long _InterlockedIncrement_nf(long volatile *_Value);
-long _InterlockedIncrement_rel(long volatile *_Value);
-__int64 _InterlockedIncrement64_acq(__int64 volatile *_Value);
-__int64 _InterlockedIncrement64_nf(__int64 volatile *_Value);
-__int64 _InterlockedIncrement64_rel(__int64 volatile *_Value);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Decrement
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-short _InterlockedDecrement16_acq(short volatile *_Value);
-short _InterlockedDecrement16_nf(short volatile *_Value);
-short _InterlockedDecrement16_rel(short volatile *_Value);
-long _InterlockedDecrement_acq(long volatile *_Value);
-long _InterlockedDecrement_nf(long volatile *_Value);
-long _InterlockedDecrement_rel(long volatile *_Value);
-__int64 _InterlockedDecrement64_acq(__int64 volatile *_Value);
-__int64 _InterlockedDecrement64_nf(__int64 volatile *_Value);
-__int64 _InterlockedDecrement64_rel(__int64 volatile *_Value);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked And
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedAnd8_acq(char volatile *_Value, char _Mask);
-char _InterlockedAnd8_nf(char volatile *_Value, char _Mask);
-char _InterlockedAnd8_rel(char volatile *_Value, char _Mask);
-short _InterlockedAnd16_acq(short volatile *_Value, short _Mask);
-short _InterlockedAnd16_nf(short volatile *_Value, short _Mask);
-short _InterlockedAnd16_rel(short volatile *_Value, short _Mask);
-long _InterlockedAnd_acq(long volatile *_Value, long _Mask);
-long _InterlockedAnd_nf(long volatile *_Value, long _Mask);
-long _InterlockedAnd_rel(long volatile *_Value, long _Mask);
-__int64 _InterlockedAnd64_acq(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedAnd64_nf(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedAnd64_rel(__int64 volatile *_Value, __int64 _Mask);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Bit Counting and Testing
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-unsigned char _interlockedbittestandset_acq(long volatile *_BitBase,
-                                            long _BitPos);
-unsigned char _interlockedbittestandset_nf(long volatile *_BitBase,
-                                           long _BitPos);
-unsigned char _interlockedbittestandset_rel(long volatile *_BitBase,
-                                            long _BitPos);
-unsigned char _interlockedbittestandreset_acq(long volatile *_BitBase,
-                                              long _BitPos);
-unsigned char _interlockedbittestandreset_nf(long volatile *_BitBase,
-                                             long _BitPos);
-unsigned char _interlockedbittestandreset_rel(long volatile *_BitBase,
-                                              long _BitPos);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Or
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedOr8_acq(char volatile *_Value, char _Mask);
-char _InterlockedOr8_nf(char volatile *_Value, char _Mask);
-char _InterlockedOr8_rel(char volatile *_Value, char _Mask);
-short _InterlockedOr16_acq(short volatile *_Value, short _Mask);
-short _InterlockedOr16_nf(short volatile *_Value, short _Mask);
-short _InterlockedOr16_rel(short volatile *_Value, short _Mask);
-long _InterlockedOr_acq(long volatile *_Value, long _Mask);
-long _InterlockedOr_nf(long volatile *_Value, long _Mask);
-long _InterlockedOr_rel(long volatile *_Value, long _Mask);
-__int64 _InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedOr64_nf(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedOr64_rel(__int64 volatile *_Value, __int64 _Mask);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Xor
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedXor8_acq(char volatile *_Value, char _Mask);
-char _InterlockedXor8_nf(char volatile *_Value, char _Mask);
-char _InterlockedXor8_rel(char volatile *_Value, char _Mask);
-short _InterlockedXor16_acq(short volatile *_Value, short _Mask);
-short _InterlockedXor16_nf(short volatile *_Value, short _Mask);
-short _InterlockedXor16_rel(short volatile *_Value, short _Mask);
-long _InterlockedXor_acq(long volatile *_Value, long _Mask);
-long _InterlockedXor_nf(long volatile *_Value, long _Mask);
-long _InterlockedXor_rel(long volatile *_Value, long _Mask);
-__int64 _InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedXor64_nf(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedXor64_rel(__int64 volatile *_Value, __int64 _Mask);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Exchange
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedExchange8_acq(char volatile *_Target, char _Value);
-char _InterlockedExchange8_nf(char volatile *_Target, char _Value);
-char _InterlockedExchange8_rel(char volatile *_Target, char _Value);
-short _InterlockedExchange16_acq(short volatile *_Target, short _Value);
-short _InterlockedExchange16_nf(short volatile *_Target, short _Value);
-short _InterlockedExchange16_rel(short volatile *_Target, short _Value);
-long _InterlockedExchange_acq(long volatile *_Target, long _Value);
-long _InterlockedExchange_nf(long volatile *_Target, long _Value);
-long _InterlockedExchange_rel(long volatile *_Target, long _Value);
-__int64 _InterlockedExchange64_acq(__int64 volatile *_Target, __int64 _Value);
-__int64 _InterlockedExchange64_nf(__int64 volatile *_Target, __int64 _Value);
-__int64 _InterlockedExchange64_rel(__int64 volatile *_Target, __int64 _Value);
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Compare Exchange
-\*----------------------------------------------------------------------------*/
-#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedCompareExchange8_acq(char volatile *_Destination,
-                             char _Exchange, char _Comparand);
-char _InterlockedCompareExchange8_nf(char volatile *_Destination,
-                             char _Exchange, char _Comparand);
-char _InterlockedCompareExchange8_rel(char volatile *_Destination,
-                             char _Exchange, char _Comparand);
-short _InterlockedCompareExchange16_acq(short volatile *_Destination,
-                              short _Exchange, short _Comparand);
-short _InterlockedCompareExchange16_nf(short volatile *_Destination,
-                              short _Exchange, short _Comparand);
-short _InterlockedCompareExchange16_rel(short volatile *_Destination,
-                              short _Exchange, short _Comparand);
-long _InterlockedCompareExchange_acq(long volatile *_Destination,
-                              long _Exchange, long _Comparand);
-long _InterlockedCompareExchange_nf(long volatile *_Destination,
-                              long _Exchange, long _Comparand);
-long _InterlockedCompareExchange_rel(long volatile *_Destination,
-                              long _Exchange, long _Comparand);
-__int64 _InterlockedCompareExchange64_acq(__int64 volatile *_Destination,
-                              __int64 _Exchange, __int64 _Comparand);
-__int64 _InterlockedCompareExchange64_nf(__int64 volatile *_Destination,
-                              __int64 _Exchange, __int64 _Comparand);
-__int64 _InterlockedCompareExchange64_rel(__int64 volatile *_Destination,
-                              __int64 _Exchange, __int64 _Comparand);
-#endif
-
-/*----------------------------------------------------------------------------*\
-|* movs, stos
-\*----------------------------------------------------------------------------*/
-#if defined(__i386__) || defined(__x86_64__)
-static __inline__ void __DEFAULT_FN_ATTRS
-__movsb(unsigned char *__dst, unsigned char const *__src, size_t __n) {
-  __asm__ __volatile__("rep movsb" : "+D"(__dst), "+S"(__src), "+c"(__n)
-                       : : "memory");
-}
-static __inline__ void __DEFAULT_FN_ATTRS
-__movsd(unsigned long *__dst, unsigned long const *__src, size_t __n) {
-  __asm__ __volatile__("rep movsl" : "+D"(__dst), "+S"(__src), "+c"(__n)
-                       : : "memory");
-}
-static __inline__ void __DEFAULT_FN_ATTRS
-__movsw(unsigned short *__dst, unsigned short const *__src, size_t __n) {
-  __asm__ __volatile__("rep movsw" : "+D"(__dst), "+S"(__src), "+c"(__n)
-                       : : "memory");
-}
-static __inline__ void __DEFAULT_FN_ATTRS
-__stosd(unsigned long *__dst, unsigned long __x, size_t __n) {
-  __asm__ __volatile__("rep stosl" : "+D"(__dst), "+c"(__n) : "a"(__x)
-                       : "memory");
-}
-static __inline__ void __DEFAULT_FN_ATTRS
-__stosw(unsigned short *__dst, unsigned short __x, size_t __n) {
-  __asm__ __volatile__("rep stosw" : "+D"(__dst), "+c"(__n) : "a"(__x)
-                       : "memory");
-}
-#endif
-#ifdef __x86_64__
-static __inline__ void __DEFAULT_FN_ATTRS
-__movsq(unsigned long long *__dst, unsigned long long const *__src, size_t __n) {
-  __asm__ __volatile__("rep movsq" : "+D"(__dst), "+S"(__src), "+c"(__n)
-                       : : "memory");
-}
-static __inline__ void __DEFAULT_FN_ATTRS
-__stosq(unsigned __int64 *__dst, unsigned __int64 __x, size_t __n) {
-  __asm__ __volatile__("rep stosq" : "+D"(__dst), "+c"(__n) : "a"(__x)
-                       : "memory");
-}
-#endif
-
-/*----------------------------------------------------------------------------*\
-|* Misc
-\*----------------------------------------------------------------------------*/
-#if defined(__i386__) || defined(__x86_64__)
-static __inline__ void __DEFAULT_FN_ATTRS
-__cpuid(int __info[4], int __level) {
-  __asm__ ("cpuid" : "=a"(__info[0]), "=b" (__info[1]), "=c"(__info[2]), "=d"(__info[3])
-                   : "a"(__level), "c"(0));
-}
-static __inline__ void __DEFAULT_FN_ATTRS
-__cpuidex(int __info[4], int __level, int __ecx) {
-  __asm__ ("cpuid" : "=a"(__info[0]), "=b" (__info[1]), "=c"(__info[2]), "=d"(__info[3])
-                   : "a"(__level), "c"(__ecx));
-}
-static __inline__ void __DEFAULT_FN_ATTRS
-__halt(void) {
-  __asm__ volatile ("hlt");
-}
-#endif
-
-#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
-static __inline__ void __DEFAULT_FN_ATTRS
-__nop(void) {
-  __asm__ volatile ("nop");
-}
-#endif
-
-/*----------------------------------------------------------------------------*\
-|* MS AArch64 specific
-\*----------------------------------------------------------------------------*/
-#if defined(__aarch64__)
-unsigned __int64 __getReg(int);
-long _InterlockedAdd(long volatile *Addend, long Value);
-__int64 _ReadStatusReg(int);
-void _WriteStatusReg(int, __int64);
-
-unsigned short __cdecl _byteswap_ushort(unsigned short val);
-unsigned long __cdecl _byteswap_ulong (unsigned long val);
-unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64 val);
-#endif
-
-/*----------------------------------------------------------------------------*\
-|* Privileged intrinsics
-\*----------------------------------------------------------------------------*/
-#if defined(__i386__) || defined(__x86_64__)
-static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
-__readmsr(unsigned long __register) {
-  // Loads the contents of a 64-bit model specific register (MSR) specified in
-  // the ECX register into registers EDX:EAX. The EDX register is loaded with
-  // the high-order 32 bits of the MSR and the EAX register is loaded with the
-  // low-order 32 bits. If less than 64 bits are implemented in the MSR being
-  // read, the values returned to EDX:EAX in unimplemented bit locations are
-  // undefined.
-  unsigned long __edx;
-  unsigned long __eax;
-  __asm__ ("rdmsr" : "=d"(__edx), "=a"(__eax) : "c"(__register));
-  return (((unsigned __int64)__edx) << 32) | (unsigned __int64)__eax;
-}
-
-static __inline__ unsigned long __DEFAULT_FN_ATTRS
-__readcr3(void) {
-  unsigned long __cr3_val;
-  __asm__ __volatile__ ("mov %%cr3, %0" : "=q"(__cr3_val) : : "memory");
-  return __cr3_val;
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS
-__writecr3(unsigned int __cr3_val) {
-  __asm__ ("mov %0, %%cr3" : : "q"(__cr3_val) : "memory");
-}
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* __INTRIN_H */
-#endif /* _MSC_VER */
diff --git a/linux-x86/lib64/clang/10.0.1/include/sanitizer/asan_interface.h b/linux-x86/lib64/clang/10.0.1/include/sanitizer/asan_interface.h
deleted file mode 100644
index ab2dc97..0000000
--- a/linux-x86/lib64/clang/10.0.1/include/sanitizer/asan_interface.h
+++ /dev/null
@@ -1,322 +0,0 @@
-//===-- sanitizer/asan_interface.h ------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of AddressSanitizer (ASan).
-//
-// Public interface header.
-//===----------------------------------------------------------------------===//
-#ifndef SANITIZER_ASAN_INTERFACE_H
-#define SANITIZER_ASAN_INTERFACE_H
-
-#include <sanitizer/common_interface_defs.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-/// Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.
-///
-/// This memory must be previously allocated by your program. Instrumented
-/// code is forbidden from accessing addresses in this region until it is
-/// unpoisoned. This function is not guaranteed to poison the entire region -
-/// it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan
-/// alignment restrictions.
-///
-/// \note This function is not thread-safe because no two threads can poison or
-/// unpoison memory in the same memory region simultaneously.
-///
-/// \param addr Start of memory region.
-/// \param size Size of memory region.
-void __asan_poison_memory_region(void const volatile *addr, size_t size);
-
-/// Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
-///
-/// This memory must be previously allocated by your program. Accessing
-/// addresses in this region is allowed until this region is poisoned again.
-/// This function could unpoison a super-region of <c>[addr, addr+size)</c> due
-/// to ASan alignment restrictions.
-///
-/// \note This function is not thread-safe because no two threads can
-/// poison or unpoison memory in the same memory region simultaneously.
-///
-/// \param addr Start of memory region.
-/// \param size Size of memory region.
-void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
-
-// Macros provided for convenience.
-#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
-/// Marks a memory region as unaddressable.
-///
-/// \note Macro provided for convenience; defined as a no-op if ASan is not
-/// enabled.
-///
-/// \param addr Start of memory region.
-/// \param size Size of memory region.
-#define ASAN_POISON_MEMORY_REGION(addr, size) \
-  __asan_poison_memory_region((addr), (size))
-
-/// Marks a memory region as addressable.
-///
-/// \note Macro provided for convenience; defined as a no-op if ASan is not
-/// enabled.
-///
-/// \param addr Start of memory region.
-/// \param size Size of memory region.
-#define ASAN_UNPOISON_MEMORY_REGION(addr, size) \
-  __asan_unpoison_memory_region((addr), (size))
-#else
-#define ASAN_POISON_MEMORY_REGION(addr, size) \
-  ((void)(addr), (void)(size))
-#define ASAN_UNPOISON_MEMORY_REGION(addr, size) \
-  ((void)(addr), (void)(size))
-#endif
-
-/// Checks if an address is poisoned.
-///
-/// Returns 1 if <c><i>addr</i></c> is poisoned (that is, 1-byte read/write
-/// access to this address would result in an error report from ASan).
-/// Otherwise returns 0.
-///
-/// \param addr Address to check.
-///
-/// \retval 1 Address is poisoned.
-/// \retval 0 Address is not poisoned.
-int __asan_address_is_poisoned(void const volatile *addr);
-
-/// Checks if a region is poisoned.
-///
-/// If at least one byte in <c>[beg, beg+size)</c> is poisoned, returns the
-/// address of the first such byte. Otherwise returns 0.
-///
-/// \param beg Start of memory region.
-/// \param size Start of memory region.
-/// \returns Address of first poisoned byte.
-void *__asan_region_is_poisoned(void *beg, size_t size);
-
-/// Describes an address (useful for calling from the debugger).
-///
-/// Prints the description of <c><i>addr</i></c>.
-///
-/// \param addr Address to describe.
-void __asan_describe_address(void *addr);
-
-/// Checks if an error has been or is being reported (useful for calling from
-/// the debugger to get information about an ASan error).
-///
-/// Returns 1 if an error has been (or is being) reported. Otherwise returns 0.
-///
-/// \returns 1 if an error has been (or is being) reported. Otherwise returns
-/// 0.
-int __asan_report_present(void);
-
-/// Gets the PC (program counter) register value of an ASan error (useful for
-/// calling from the debugger).
-///
-/// Returns PC if an error has been (or is being) reported.
-/// Otherwise returns 0.
-///
-/// \returns PC value.
-void *__asan_get_report_pc(void);
-
-/// Gets the BP (base pointer) register value of an ASan error (useful for
-/// calling from the debugger).
-///
-/// Returns BP if an error has been (or is being) reported.
-/// Otherwise returns 0.
-///
-/// \returns BP value.
-void *__asan_get_report_bp(void);
-
-/// Gets the SP (stack pointer) register value of an ASan error (useful for
-/// calling from the debugger).
-///
-/// If an error has been (or is being) reported, returns SP.
-/// Otherwise returns 0.
-///
-/// \returns SP value.
-void *__asan_get_report_sp(void);
-
-/// Gets the address of the report buffer of an ASan error (useful for calling
-/// from the debugger).
-///
-/// Returns the address of the report buffer if an error has been (or is being)
-/// reported. Otherwise returns 0.
-///
-/// \returns Address of report buffer.
-void *__asan_get_report_address(void);
-
-/// Gets access type of an ASan error (useful for calling from the debugger).
-///
-/// Returns access type (read or write) if an error has been (or is being)
-/// reported. Otherwise returns 0.
-///
-/// \returns Access type (0 = read, 1 = write).
-int __asan_get_report_access_type(void);
-
-/// Gets access size of an ASan error (useful for calling from the debugger).
-///
-/// Returns access size if an error has been (or is being) reported. Otherwise
-/// returns 0.
-///
-/// \returns Access size in bytes.
-size_t __asan_get_report_access_size(void);
-
-/// Gets the bug description of an ASan error (useful for calling from a
-/// debugger).
-///
-/// \returns Returns a bug description if an error has been (or is being)
-/// reported - for example, "heap-use-after-free". Otherwise returns an empty
-/// string.
-const char *__asan_get_report_description(void);
-
-/// Gets information about a pointer (useful for calling from the debugger).
-///
-/// Returns the category of the given pointer as a constant string.
-/// Possible return values are <c>global</c>, <c>stack</c>, <c>stack-fake</c>,
-/// <c>heap</c>, <c>heap-invalid</c>, <c>shadow-low</c>, <c>shadow-gap</c>,
-/// <c>shadow-high</c>, and <c>unknown</c>.
-///
-/// If the return value is <c>global</c> or <c>stack</c>, tries to also return
-/// the variable name, address, and size. If the return value is <c>heap</c>,
-/// tries to return the chunk address and size. <c><i>name</i></c> should point
-/// to an allocated buffer of size <c><i>name_size</i></c>.
-///
-/// \param addr Address to locate.
-/// \param name Buffer to store the variable's name.
-/// \param name_size Size in bytes of the variable's name buffer.
-/// \param region_address [out] Address of the region.
-/// \param region_size [out] Size of the region in bytes.
-///
-/// \returns Returns the category of the given pointer as a constant string.
-const char *__asan_locate_address(void *addr, char *name, size_t name_size,
-                                  void **region_address, size_t *region_size);
-
-/// Gets the allocation stack trace and thread ID for a heap address (useful
-/// for calling from the debugger).
-///
-/// Stores up to <c><i>size</i></c> frames in <c><i>trace</i></c>. Returns
-/// the number of stored frames or 0 on error.
-///
-/// \param addr A heap address.
-/// \param trace A buffer to store the stack trace.
-/// \param size Size in bytes of the trace buffer.
-/// \param thread_id [out] The thread ID of the address.
-///
-/// \returns Returns the number of stored frames or 0 on error.
-size_t __asan_get_alloc_stack(void *addr, void **trace, size_t size,
-                              int *thread_id);
-
-/// Gets the free stack trace and thread ID for a heap address (useful for
-/// calling from the debugger).
-///
-/// Stores up to <c><i>size</i></c> frames in <c><i>trace</i></c>. Returns
-/// the number of stored frames or 0 on error.
-///
-/// \param addr A heap address.
-/// \param trace A buffer to store the stack trace.
-/// \param size Size in bytes of the trace buffer.
-/// \param thread_id [out] The thread ID of the address.
-///
-/// \returns Returns the number of stored frames or 0 on error.
-size_t __asan_get_free_stack(void *addr, void **trace, size_t size,
-                             int *thread_id);
-
-/// Gets the current shadow memory mapping (useful for calling from the
-/// debugger).
-///
-/// \param shadow_scale [out] Shadow scale value.
-/// \param shadow_offset [out] Offset value.
-void __asan_get_shadow_mapping(size_t *shadow_scale, size_t *shadow_offset);
-
-/// This is an internal function that is called to report an error. However,
-/// it is still a part of the interface because you might want to set a
-/// breakpoint on this function in the debugger.
-///
-/// \param pc <c><i>pc</i></c> value of the ASan error.
-/// \param bp <c><i>bp</i></c> value of the ASan error.
-/// \param sp <c><i>sp</i></c> value of the ASan error.
-/// \param addr Address of the ASan error.
-/// \param is_write True if the error is a write error; false otherwise.
-/// \param access_size Size of the memory access of the ASan error.
-void __asan_report_error(void *pc, void *bp, void *sp,
-                         void *addr, int is_write, size_t access_size);
-
-// Deprecated. Call __sanitizer_set_death_callback instead.
-void __asan_set_death_callback(void (*callback)(void));
-
-/// Sets the callback function to be called during ASan error reporting.
-///
-/// The callback provides a string pointer to the report.
-///
-/// \param callback User-provided function.
-void __asan_set_error_report_callback(void (*callback)(const char *));
-
-/// User-provided callback on ASan errors.
-///
-/// You can provide a function that would be called immediately when ASan
-/// detects an error. This is useful in cases when ASan detects an error but
-/// your program crashes before the ASan report is printed.
-void __asan_on_error(void);
-
-/// Prints accumulated statistics to <c>stderr</c> (useful for calling from the
-/// debugger).
-void __asan_print_accumulated_stats(void);
-
-/// User-provided default option settings.
-///
-/// You can provide your own implementation of this function to return a string
-/// containing ASan runtime options (for example,
-/// <c>verbosity=1:halt_on_error=0</c>).
-///
-/// \returns Default options string.
-const char* __asan_default_options(void);
-
-// The following two functions facilitate garbage collection in presence of
-// ASan's fake stack.
-
-/// Gets an opaque handler to the current thread's fake stack.
-///
-/// Returns an opaque handler to be used by
-/// <c>__asan_addr_is_in_fake_stack()</c>. Returns NULL if the current thread
-/// does not have a fake stack.
-///
-/// \returns An opaque handler to the fake stack or NULL.
-void *__asan_get_current_fake_stack(void);
-
-/// Checks if an address belongs to a given fake stack.
-///
-/// If <c><i>fake_stack</i></c> is non-NULL and <c><i>addr</i></c> belongs to a
-/// fake frame in <c><i>fake_stack</i></c>, returns the address of the real
-/// stack that corresponds to the fake frame and sets <c><i>beg</i></c> and
-/// <c><i>end</i></c> to the boundaries of this fake frame. Otherwise returns
-/// NULL and does not touch <c><i>beg</i></c> and <c><i>end</i></c>.
-///
-/// If <c><i>beg</i></c> or <c><i>end</i></c> are NULL, they are not touched.
-///
-/// \note This function can be called from a thread other than the owner of
-/// <c><i>fake_stack</i></c>, but the owner thread needs to be alive.
-///
-/// \param fake_stack An opaque handler to a fake stack.
-/// \param addr Address to test.
-/// \param beg [out] Beginning of fake frame.
-/// \param end [out] End of fake frame.
-/// \returns Stack address or NULL.
-void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
-                                   void **end);
-
-/// Performs shadow memory cleanup of the current thread's stack before a
-/// function marked with the <c>[[noreturn]]</c> attribute is called.
-///
-/// To avoid false positives on the stack, must be called before no-return
-/// functions like <c>_exit()</c> and <c>execl()</c>.
-void __asan_handle_no_return(void);
-
-#ifdef __cplusplus
-}  // extern "C"
-#endif
-
-#endif  // SANITIZER_ASAN_INTERFACE_H
diff --git a/linux-x86/lib64/clang/10.0.1/include/sanitizer/dfsan_interface.h b/linux-x86/lib64/clang/10.0.1/include/sanitizer/dfsan_interface.h
deleted file mode 100644
index c189ee5..0000000
--- a/linux-x86/lib64/clang/10.0.1/include/sanitizer/dfsan_interface.h
+++ /dev/null
@@ -1,121 +0,0 @@
-//===-- dfsan_interface.h -------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of DataFlowSanitizer.
-//
-// Public interface header.
-//===----------------------------------------------------------------------===//
-#ifndef DFSAN_INTERFACE_H
-#define DFSAN_INTERFACE_H
-
-#include <stddef.h>
-#include <stdint.h>
-#include <sanitizer/common_interface_defs.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef uint16_t dfsan_label;
-
-/// Stores information associated with a specific label identifier.  A label
-/// may be a base label created using dfsan_create_label, with associated
-/// text description and user data, or an automatically created union label,
-/// which represents the union of two label identifiers (which may themselves
-/// be base or union labels).
-struct dfsan_label_info {
-  // Fields for union labels, set to 0 for base labels.
-  dfsan_label l1;
-  dfsan_label l2;
-
-  // Fields for base labels.
-  const char *desc;
-  void *userdata;
-};
-
-/// Signature of the callback argument to dfsan_set_write_callback().
-typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
-
-/// Computes the union of \c l1 and \c l2, possibly creating a union label in
-/// the process.
-dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
-
-/// Creates and returns a base label with the given description and user data.
-dfsan_label dfsan_create_label(const char *desc, void *userdata);
-
-/// Sets the label for each address in [addr,addr+size) to \c label.
-void dfsan_set_label(dfsan_label label, void *addr, size_t size);
-
-/// Sets the label for each address in [addr,addr+size) to the union of the
-/// current label for that address and \c label.
-void dfsan_add_label(dfsan_label label, void *addr, size_t size);
-
-/// Retrieves the label associated with the given data.
-///
-/// The type of 'data' is arbitrary.  The function accepts a value of any type,
-/// which can be truncated or extended (implicitly or explicitly) as necessary.
-/// The truncation/extension operations will preserve the label of the original
-/// value.
-dfsan_label dfsan_get_label(long data);
-
-/// Retrieves the label associated with the data at the given address.
-dfsan_label dfsan_read_label(const void *addr, size_t size);
-
-/// Retrieves a pointer to the dfsan_label_info struct for the given label.
-const struct dfsan_label_info *dfsan_get_label_info(dfsan_label label);
-
-/// Returns whether the given label label contains the label elem.
-int dfsan_has_label(dfsan_label label, dfsan_label elem);
-
-/// If the given label label contains a label with the description desc, returns
-/// that label, else returns 0.
-dfsan_label dfsan_has_label_with_desc(dfsan_label label, const char *desc);
-
-/// Returns the number of labels allocated.
-size_t dfsan_get_label_count(void);
-
-/// Flushes the DFSan shadow, i.e. forgets about all labels currently associated
-/// with the application memory. Will work only if there are no other
-/// threads executing DFSan-instrumented code concurrently.
-/// Use this call to start over the taint tracking within the same procces.
-void dfsan_flush(void);
-
-/// Sets a callback to be invoked on calls to write().  The callback is invoked
-/// before the write is done.  The write is not guaranteed to succeed when the
-/// callback executes.  Pass in NULL to remove any callback.
-void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
-
-/// Writes the labels currently used by the program to the given file
-/// descriptor. The lines of the output have the following format:
-///
-/// <label> <parent label 1> <parent label 2> <label description if any>
-void dfsan_dump_labels(int fd);
-
-/// Interceptor hooks.
-/// Whenever a dfsan's custom function is called the corresponding
-/// hook is called it non-zero. The hooks should be defined by the user.
-/// The primary use case is taint-guided fuzzing, where the fuzzer
-/// needs to see the parameters of the function and the labels.
-/// FIXME: implement more hooks.
-void dfsan_weak_hook_memcmp(void *caller_pc, const void *s1, const void *s2,
-                            size_t n, dfsan_label s1_label,
-                            dfsan_label s2_label, dfsan_label n_label);
-void dfsan_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2,
-                             size_t n, dfsan_label s1_label,
-                             dfsan_label s2_label, dfsan_label n_label);
-#ifdef __cplusplus
-}  // extern "C"
-
-template <typename T>
-void dfsan_set_label(dfsan_label label, T &data) {  // NOLINT
-  dfsan_set_label(label, (void *)&data, sizeof(T));
-}
-
-#endif
-
-#endif  // DFSAN_INTERFACE_H
diff --git a/linux-x86/lib64/clang/10.0.1/include/sanitizer/netbsd_syscall_hooks.h b/linux-x86/lib64/clang/10.0.1/include/sanitizer/netbsd_syscall_hooks.h
deleted file mode 100644
index 27780e0..0000000
--- a/linux-x86/lib64/clang/10.0.1/include/sanitizer/netbsd_syscall_hooks.h
+++ /dev/null
@@ -1,4731 +0,0 @@
-//===-- netbsd_syscall_hooks.h --------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of public sanitizer interface.
-//
-// System call handlers.
-//
-// Interface methods declared in this header implement pre- and post- syscall
-// actions for the active sanitizer.
-// Usage:
-//   __sanitizer_syscall_pre_getfoo(...args...);
-//   long long res = syscall(SYS_getfoo, ...args...);
-//   __sanitizer_syscall_post_getfoo(res, ...args...);
-//
-// DO NOT EDIT! THIS FILE HAS BEEN GENERATED!
-//
-// Generated with: generate_netbsd_syscalls.awk
-// Generated date: 2018-10-30
-// Generated from: syscalls.master,v 1.293 2018/07/31 13:00:13 rjs Exp
-//
-//===----------------------------------------------------------------------===//
-#ifndef SANITIZER_NETBSD_SYSCALL_HOOKS_H
-#define SANITIZER_NETBSD_SYSCALL_HOOKS_H
-
-#define __sanitizer_syscall_pre_syscall(code, arg0, arg1, arg2, arg3, arg4,    \
-                                        arg5, arg6, arg7)                      \
-  __sanitizer_syscall_pre_impl_syscall(                                        \
-      (long long)(code), (long long)(arg0), (long long)(arg1),                 \
-      (long long)(arg2), (long long)(arg3), (long long)(arg4),                 \
-      (long long)(arg5), (long long)(arg6), (long long)(arg7))
-#define __sanitizer_syscall_post_syscall(res, code, arg0, arg1, arg2, arg3,    \
-                                         arg4, arg5, arg6, arg7)               \
-  __sanitizer_syscall_post_impl_syscall(                                       \
-      res, (long long)(code), (long long)(arg0), (long long)(arg1),            \
-      (long long)(arg2), (long long)(arg3), (long long)(arg4),                 \
-      (long long)(arg5), (long long)(arg6), (long long)(arg7))
-#define __sanitizer_syscall_pre_exit(rval)                                     \
-  __sanitizer_syscall_pre_impl_exit((long long)(rval))
-#define __sanitizer_syscall_post_exit(res, rval)                               \
-  __sanitizer_syscall_post_impl_exit(res, (long long)(rval))
-#define __sanitizer_syscall_pre_fork() __sanitizer_syscall_pre_impl_fork()
-#define __sanitizer_syscall_post_fork(res)                                     \
-  __sanitizer_syscall_post_impl_fork(res)
-#define __sanitizer_syscall_pre_read(fd, buf, nbyte)                           \
-  __sanitizer_syscall_pre_impl_read((long long)(fd), (long long)(buf),         \
-                                    (long long)(nbyte))
-#define __sanitizer_syscall_post_read(res, fd, buf, nbyte)                     \
-  __sanitizer_syscall_post_impl_read(res, (long long)(fd), (long long)(buf),   \
-                                     (long long)(nbyte))
-#define __sanitizer_syscall_pre_write(fd, buf, nbyte)                          \
-  __sanitizer_syscall_pre_impl_write((long long)(fd), (long long)(buf),        \
-                                     (long long)(nbyte))
-#define __sanitizer_syscall_post_write(res, fd, buf, nbyte)                    \
-  __sanitizer_syscall_post_impl_write(res, (long long)(fd), (long long)(buf),  \
-                                      (long long)(nbyte))
-#define __sanitizer_syscall_pre_open(path, flags, mode)                        \
-  __sanitizer_syscall_pre_impl_open((long long)(path), (long long)(flags),     \
-                                    (long long)(mode))
-#define __sanitizer_syscall_post_open(res, path, flags, mode)                  \
-  __sanitizer_syscall_post_impl_open(res, (long long)(path),                   \
-                                     (long long)(flags), (long long)(mode))
-#define __sanitizer_syscall_pre_close(fd)                                      \
-  __sanitizer_syscall_pre_impl_close((long long)(fd))
-#define __sanitizer_syscall_post_close(res, fd)                                \
-  __sanitizer_syscall_post_impl_close(res, (long long)(fd))
-#define __sanitizer_syscall_pre_compat_50_wait4(pid, status, options, rusage)  \
-  __sanitizer_syscall_pre_impl_compat_50_wait4(                                \
-      (long long)(pid), (long long)(status), (long long)(options),             \
-      (long long)(rusage))
-#define __sanitizer_syscall_post_compat_50_wait4(res, pid, status, options,    \
-                                                 rusage)                       \
-  __sanitizer_syscall_post_impl_compat_50_wait4(                               \
-      res, (long long)(pid), (long long)(status), (long long)(options),        \
-      (long long)(rusage))
-#define __sanitizer_syscall_pre_compat_43_ocreat(path, mode)                   \
-  __sanitizer_syscall_pre_impl_compat_43_ocreat((long long)(path),             \
-                                                (long long)(mode))
-#define __sanitizer_syscall_post_compat_43_ocreat(res, path, mode)             \
-  __sanitizer_syscall_post_impl_compat_43_ocreat(res, (long long)(path),       \
-                                                 (long long)(mode))
-#define __sanitizer_syscall_pre_link(path, link)                               \
-  __sanitizer_syscall_pre_impl_link((long long)(path), (long long)(link))
-#define __sanitizer_syscall_post_link(res, path, link)                         \
-  __sanitizer_syscall_post_impl_link(res, (long long)(path), (long long)(link))
-#define __sanitizer_syscall_pre_unlink(path)                                   \
-  __sanitizer_syscall_pre_impl_unlink((long long)(path))
-#define __sanitizer_syscall_post_unlink(res, path)                             \
-  __sanitizer_syscall_post_impl_unlink(res, (long long)(path))
-/* syscall 11 has been skipped */
-#define __sanitizer_syscall_pre_chdir(path)                                    \
-  __sanitizer_syscall_pre_impl_chdir((long long)(path))
-#define __sanitizer_syscall_post_chdir(res, path)                              \
-  __sanitizer_syscall_post_impl_chdir(res, (long long)(path))
-#define __sanitizer_syscall_pre_fchdir(fd)                                     \
-  __sanitizer_syscall_pre_impl_fchdir((long long)(fd))
-#define __sanitizer_syscall_post_fchdir(res, fd)                               \
-  __sanitizer_syscall_post_impl_fchdir(res, (long long)(fd))
-#define __sanitizer_syscall_pre_compat_50_mknod(path, mode, dev)               \
-  __sanitizer_syscall_pre_impl_compat_50_mknod(                                \
-      (long long)(path), (long long)(mode), (long long)(dev))
-#define __sanitizer_syscall_post_compat_50_mknod(res, path, mode, dev)         \
-  __sanitizer_syscall_post_impl_compat_50_mknod(                               \
-      res, (long long)(path), (long long)(mode), (long long)(dev))
-#define __sanitizer_syscall_pre_chmod(path, mode)                              \
-  __sanitizer_syscall_pre_impl_chmod((long long)(path), (long long)(mode))
-#define __sanitizer_syscall_post_chmod(res, path, mode)                        \
-  __sanitizer_syscall_post_impl_chmod(res, (long long)(path), (long long)(mode))
-#define __sanitizer_syscall_pre_chown(path, uid, gid)                          \
-  __sanitizer_syscall_pre_impl_chown((long long)(path), (long long)(uid),      \
-                                     (long long)(gid))
-#define __sanitizer_syscall_post_chown(res, path, uid, gid)                    \
-  __sanitizer_syscall_post_impl_chown(res, (long long)(path),                  \
-                                      (long long)(uid), (long long)(gid))
-#define __sanitizer_syscall_pre_break(nsize)                                   \
-  __sanitizer_syscall_pre_impl_break((long long)(nsize))
-#define __sanitizer_syscall_post_break(res, nsize)                             \
-  __sanitizer_syscall_post_impl_break(res, (long long)(nsize))
-#define __sanitizer_syscall_pre_compat_20_getfsstat(buf, bufsize, flags)       \
-  __sanitizer_syscall_pre_impl_compat_20_getfsstat(                            \
-      (long long)(buf), (long long)(bufsize), (long long)(flags))
-#define __sanitizer_syscall_post_compat_20_getfsstat(res, buf, bufsize, flags) \
-  __sanitizer_syscall_post_impl_compat_20_getfsstat(                           \
-      res, (long long)(buf), (long long)(bufsize), (long long)(flags))
-#define __sanitizer_syscall_pre_compat_43_olseek(fd, offset, whence)           \
-  __sanitizer_syscall_pre_impl_compat_43_olseek(                               \
-      (long long)(fd), (long long)(offset), (long long)(whence))
-#define __sanitizer_syscall_post_compat_43_olseek(res, fd, offset, whence)     \
-  __sanitizer_syscall_post_impl_compat_43_olseek(                              \
-      res, (long long)(fd), (long long)(offset), (long long)(whence))
-#define __sanitizer_syscall_pre_getpid() __sanitizer_syscall_pre_impl_getpid()
-#define __sanitizer_syscall_post_getpid(res)                                   \
-  __sanitizer_syscall_post_impl_getpid(res)
-#define __sanitizer_syscall_pre_compat_40_mount(type, path, flags, data)       \
-  __sanitizer_syscall_pre_impl_compat_40_mount(                                \
-      (long long)(type), (long long)(path), (long long)(flags),                \
-      (long long)(data))
-#define __sanitizer_syscall_post_compat_40_mount(res, type, path, flags, data) \
-  __sanitizer_syscall_post_impl_compat_40_mount(                               \
-      res, (long long)(type), (long long)(path), (long long)(flags),           \
-      (long long)(data))
-#define __sanitizer_syscall_pre_unmount(path, flags)                           \
-  __sanitizer_syscall_pre_impl_unmount((long long)(path), (long long)(flags))
-#define __sanitizer_syscall_post_unmount(res, path, flags)                     \
-  __sanitizer_syscall_post_impl_unmount(res, (long long)(path),                \
-                                        (long long)(flags))
-#define __sanitizer_syscall_pre_setuid(uid)                                    \
-  __sanitizer_syscall_pre_impl_setuid((long long)(uid))
-#define __sanitizer_syscall_post_setuid(res, uid)                              \
-  __sanitizer_syscall_post_impl_setuid(res, (long long)(uid))
-#define __sanitizer_syscall_pre_getuid() __sanitizer_syscall_pre_impl_getuid()
-#define __sanitizer_syscall_post_getuid(res)                                   \
-  __sanitizer_syscall_post_impl_getuid(res)
-#define __sanitizer_syscall_pre_geteuid() __sanitizer_syscall_pre_impl_geteuid()
-#define __sanitizer_syscall_post_geteuid(res)                                  \
-  __sanitizer_syscall_post_impl_geteuid(res)
-#define __sanitizer_syscall_pre_ptrace(req, pid, addr, data)                   \
-  __sanitizer_syscall_pre_impl_ptrace((long long)(req), (long long)(pid),      \
-                                      (long long)(addr), (long long)(data))
-#define __sanitizer_syscall_post_ptrace(res, req, pid, addr, data)             \
-  __sanitizer_syscall_post_impl_ptrace(res, (long long)(req),                  \
-                                       (long long)(pid), (long long)(addr),    \
-                                       (long long)(data))
-#define __sanitizer_syscall_pre_recvmsg(s, msg, flags)                         \
-  __sanitizer_syscall_pre_impl_recvmsg((long long)(s), (long long)(msg),       \
-                                       (long long)(flags))
-#define __sanitizer_syscall_post_recvmsg(res, s, msg, flags)                   \
-  __sanitizer_syscall_post_impl_recvmsg(res, (long long)(s), (long long)(msg), \
-                                        (long long)(flags))
-#define __sanitizer_syscall_pre_sendmsg(s, msg, flags)                         \
-  __sanitizer_syscall_pre_impl_sendmsg((long long)(s), (long long)(msg),       \
-                                       (long long)(flags))
-#define __sanitizer_syscall_post_sendmsg(res, s, msg, flags)                   \
-  __sanitizer_syscall_post_impl_sendmsg(res, (long long)(s), (long long)(msg), \
-                                        (long long)(flags))
-#define __sanitizer_syscall_pre_recvfrom(s, buf, len, flags, from,             \
-                                         fromlenaddr)                          \
-  __sanitizer_syscall_pre_impl_recvfrom(                                       \
-      (long long)(s), (long long)(buf), (long long)(len), (long long)(flags),  \
-      (long long)(from), (long long)(fromlenaddr))
-#define __sanitizer_syscall_post_recvfrom(res, s, buf, len, flags, from,       \
-                                          fromlenaddr)                         \
-  __sanitizer_syscall_post_impl_recvfrom(                                      \
-      res, (long long)(s), (long long)(buf), (long long)(len),                 \
-      (long long)(flags), (long long)(from), (long long)(fromlenaddr))
-#define __sanitizer_syscall_pre_accept(s, name, anamelen)                      \
-  __sanitizer_syscall_pre_impl_accept((long long)(s), (long long)(name),       \
-                                      (long long)(anamelen))
-#define __sanitizer_syscall_post_accept(res, s, name, anamelen)                \
-  __sanitizer_syscall_post_impl_accept(res, (long long)(s), (long long)(name), \
-                                       (long long)(anamelen))
-#define __sanitizer_syscall_pre_getpeername(fdes, asa, alen)                   \
-  __sanitizer_syscall_pre_impl_getpeername(                                    \
-      (long long)(fdes), (long long)(asa), (long long)(alen))
-#define __sanitizer_syscall_post_getpeername(res, fdes, asa, alen)             \
-  __sanitizer_syscall_post_impl_getpeername(                                   \
-      res, (long long)(fdes), (long long)(asa), (long long)(alen))
-#define __sanitizer_syscall_pre_getsockname(fdes, asa, alen)                   \
-  __sanitizer_syscall_pre_impl_getsockname(                                    \
-      (long long)(fdes), (long long)(asa), (long long)(alen))
-#define __sanitizer_syscall_post_getsockname(res, fdes, asa, alen)             \
-  __sanitizer_syscall_post_impl_getsockname(                                   \
-      res, (long long)(fdes), (long long)(asa), (long long)(alen))
-#define __sanitizer_syscall_pre_access(path, flags)                            \
-  __sanitizer_syscall_pre_impl_access((long long)(path), (long long)(flags))
-#define __sanitizer_syscall_post_access(res, path, flags)                      \
-  __sanitizer_syscall_post_impl_access(res, (long long)(path),                 \
-                                       (long long)(flags))
-#define __sanitizer_syscall_pre_chflags(path, flags)                           \
-  __sanitizer_syscall_pre_impl_chflags((long long)(path), (long long)(flags))
-#define __sanitizer_syscall_post_chflags(res, path, flags)                     \
-  __sanitizer_syscall_post_impl_chflags(res, (long long)(path),                \
-                                        (long long)(flags))
-#define __sanitizer_syscall_pre_fchflags(fd, flags)                            \
-  __sanitizer_syscall_pre_impl_fchflags((long long)(fd), (long long)(flags))
-#define __sanitizer_syscall_post_fchflags(res, fd, flags)                      \
-  __sanitizer_syscall_post_impl_fchflags(res, (long long)(fd),                 \
-                                         (long long)(flags))
-#define __sanitizer_syscall_pre_sync() __sanitizer_syscall_pre_impl_sync()
-#define __sanitizer_syscall_post_sync(res)                                     \
-  __sanitizer_syscall_post_impl_sync(res)
-#define __sanitizer_syscall_pre_kill(pid, signum)                              \
-  __sanitizer_syscall_pre_impl_kill((long long)(pid), (long long)(signum))
-#define __sanitizer_syscall_post_kill(res, pid, signum)                        \
-  __sanitizer_syscall_post_impl_kill(res, (long long)(pid), (long long)(signum))
-#define __sanitizer_syscall_pre_compat_43_stat43(path, ub)                     \
-  __sanitizer_syscall_pre_impl_compat_43_stat43((long long)(path),             \
-                                                (long long)(ub))
-#define __sanitizer_syscall_post_compat_43_stat43(res, path, ub)               \
-  __sanitizer_syscall_post_impl_compat_43_stat43(res, (long long)(path),       \
-                                                 (long long)(ub))
-#define __sanitizer_syscall_pre_getppid() __sanitizer_syscall_pre_impl_getppid()
-#define __sanitizer_syscall_post_getppid(res)                                  \
-  __sanitizer_syscall_post_impl_getppid(res)
-#define __sanitizer_syscall_pre_compat_43_lstat43(path, ub)                    \
-  __sanitizer_syscall_pre_impl_compat_43_lstat43((long long)(path),            \
-                                                 (long long)(ub))
-#define __sanitizer_syscall_post_compat_43_lstat43(res, path, ub)              \
-  __sanitizer_syscall_post_impl_compat_43_lstat43(res, (long long)(path),      \
-                                                  (long long)(ub))
-#define __sanitizer_syscall_pre_dup(fd)                                        \
-  __sanitizer_syscall_pre_impl_dup((long long)(fd))
-#define __sanitizer_syscall_post_dup(res, fd)                                  \
-  __sanitizer_syscall_post_impl_dup(res, (long long)(fd))
-#define __sanitizer_syscall_pre_pipe() __sanitizer_syscall_pre_impl_pipe()
-#define __sanitizer_syscall_post_pipe(res)                                     \
-  __sanitizer_syscall_post_impl_pipe(res)
-#define __sanitizer_syscall_pre_getegid() __sanitizer_syscall_pre_impl_getegid()
-#define __sanitizer_syscall_post_getegid(res)                                  \
-  __sanitizer_syscall_post_impl_getegid(res)
-#define __sanitizer_syscall_pre_profil(samples, size, offset, scale)           \
-  __sanitizer_syscall_pre_impl_profil((long long)(samples), (long long)(size), \
-                                      (long long)(offset), (long long)(scale))
-#define __sanitizer_syscall_post_profil(res, samples, size, offset, scale)     \
-  __sanitizer_syscall_post_impl_profil(res, (long long)(samples),              \
-                                       (long long)(size), (long long)(offset), \
-                                       (long long)(scale))
-#define __sanitizer_syscall_pre_ktrace(fname, ops, facs, pid)                  \
-  __sanitizer_syscall_pre_impl_ktrace((long long)(fname), (long long)(ops),    \
-                                      (long long)(facs), (long long)(pid))
-#define __sanitizer_syscall_post_ktrace(res, fname, ops, facs, pid)            \
-  __sanitizer_syscall_post_impl_ktrace(res, (long long)(fname),                \
-                                       (long long)(ops), (long long)(facs),    \
-                                       (long long)(pid))
-#define __sanitizer_syscall_pre_compat_13_sigaction13(signum, nsa, osa)        \
-  __sanitizer_syscall_pre_impl_compat_13_sigaction13(                          \
-      (long long)(signum), (long long)(nsa), (long long)(osa))
-#define __sanitizer_syscall_post_compat_13_sigaction13(res, signum, nsa, osa)  \
-  __sanitizer_syscall_post_impl_compat_13_sigaction13(                         \
-      res, (long long)(signum), (long long)(nsa), (long long)(osa))
-#define __sanitizer_syscall_pre_getgid() __sanitizer_syscall_pre_impl_getgid()
-#define __sanitizer_syscall_post_getgid(res)                                   \
-  __sanitizer_syscall_post_impl_getgid(res)
-#define __sanitizer_syscall_pre_compat_13_sigprocmask13(how, mask)             \
-  __sanitizer_syscall_pre_impl_compat_13_sigprocmask13((long long)(how),       \
-                                                       (long long)(mask))
-#define __sanitizer_syscall_post_compat_13_sigprocmask13(res, how, mask)       \
-  __sanitizer_syscall_post_impl_compat_13_sigprocmask13(res, (long long)(how), \
-                                                        (long long)(mask))
-#define __sanitizer_syscall_pre___getlogin(namebuf, namelen)                   \
-  __sanitizer_syscall_pre_impl___getlogin((long long)(namebuf),                \
-                                          (long long)(namelen))
-#define __sanitizer_syscall_post___getlogin(res, namebuf, namelen)             \
-  __sanitizer_syscall_post_impl___getlogin(res, (long long)(namebuf),          \
-                                           (long long)(namelen))
-#define __sanitizer_syscall_pre___setlogin(namebuf)                            \
-  __sanitizer_syscall_pre_impl___setlogin((long long)(namebuf))
-#define __sanitizer_syscall_post___setlogin(res, namebuf)                      \
-  __sanitizer_syscall_post_impl___setlogin(res, (long long)(namebuf))
-#define __sanitizer_syscall_pre_acct(path)                                     \
-  __sanitizer_syscall_pre_impl_acct((long long)(path))
-#define __sanitizer_syscall_post_acct(res, path)                               \
-  __sanitizer_syscall_post_impl_acct(res, (long long)(path))
-#define __sanitizer_syscall_pre_compat_13_sigpending13()                       \
-  __sanitizer_syscall_pre_impl_compat_13_sigpending13()
-#define __sanitizer_syscall_post_compat_13_sigpending13(res)                   \
-  __sanitizer_syscall_post_impl_compat_13_sigpending13(res)
-#define __sanitizer_syscall_pre_compat_13_sigaltstack13(nss, oss)              \
-  __sanitizer_syscall_pre_impl_compat_13_sigaltstack13((long long)(nss),       \
-                                                       (long long)(oss))
-#define __sanitizer_syscall_post_compat_13_sigaltstack13(res, nss, oss)        \
-  __sanitizer_syscall_post_impl_compat_13_sigaltstack13(res, (long long)(nss), \
-                                                        (long long)(oss))
-#define __sanitizer_syscall_pre_ioctl(fd, com, data)                           \
-  __sanitizer_syscall_pre_impl_ioctl((long long)(fd), (long long)(com),        \
-                                     (long long)(data))
-#define __sanitizer_syscall_post_ioctl(res, fd, com, data)                     \
-  __sanitizer_syscall_post_impl_ioctl(res, (long long)(fd), (long long)(com),  \
-                                      (long long)(data))
-#define __sanitizer_syscall_pre_compat_12_oreboot(opt)                         \
-  __sanitizer_syscall_pre_impl_compat_12_oreboot((long long)(opt))
-#define __sanitizer_syscall_post_compat_12_oreboot(res, opt)                   \
-  __sanitizer_syscall_post_impl_compat_12_oreboot(res, (long long)(opt))
-#define __sanitizer_syscall_pre_revoke(path)                                   \
-  __sanitizer_syscall_pre_impl_revoke((long long)(path))
-#define __sanitizer_syscall_post_revoke(res, path)                             \
-  __sanitizer_syscall_post_impl_revoke(res, (long long)(path))
-#define __sanitizer_syscall_pre_symlink(path, link)                            \
-  __sanitizer_syscall_pre_impl_symlink((long long)(path), (long long)(link))
-#define __sanitizer_syscall_post_symlink(res, path, link)                      \
-  __sanitizer_syscall_post_impl_symlink(res, (long long)(path),                \
-                                        (long long)(link))
-#define __sanitizer_syscall_pre_readlink(path, buf, count)                     \
-  __sanitizer_syscall_pre_impl_readlink((long long)(path), (long long)(buf),   \
-                                        (long long)(count))
-#define __sanitizer_syscall_post_readlink(res, path, buf, count)               \
-  __sanitizer_syscall_post_impl_readlink(res, (long long)(path),               \
-                                         (long long)(buf), (long long)(count))
-#define __sanitizer_syscall_pre_execve(path, argp, envp)                       \
-  __sanitizer_syscall_pre_impl_execve((long long)(path), (long long)(argp),    \
-                                      (long long)(envp))
-#define __sanitizer_syscall_post_execve(res, path, argp, envp)                 \
-  __sanitizer_syscall_post_impl_execve(res, (long long)(path),                 \
-                                       (long long)(argp), (long long)(envp))
-#define __sanitizer_syscall_pre_umask(newmask)                                 \
-  __sanitizer_syscall_pre_impl_umask((long long)(newmask))
-#define __sanitizer_syscall_post_umask(res, newmask)                           \
-  __sanitizer_syscall_post_impl_umask(res, (long long)(newmask))
-#define __sanitizer_syscall_pre_chroot(path)                                   \
-  __sanitizer_syscall_pre_impl_chroot((long long)(path))
-#define __sanitizer_syscall_post_chroot(res, path)                             \
-  __sanitizer_syscall_post_impl_chroot(res, (long long)(path))
-#define __sanitizer_syscall_pre_compat_43_fstat43(fd, sb)                      \
-  __sanitizer_syscall_pre_impl_compat_43_fstat43((long long)(fd),              \
-                                                 (long long)(sb))
-#define __sanitizer_syscall_post_compat_43_fstat43(res, fd, sb)                \
-  __sanitizer_syscall_post_impl_compat_43_fstat43(res, (long long)(fd),        \
-                                                  (long long)(sb))
-#define __sanitizer_syscall_pre_compat_43_ogetkerninfo(op, where, size, arg)   \
-  __sanitizer_syscall_pre_impl_compat_43_ogetkerninfo(                         \
-      (long long)(op), (long long)(where), (long long)(size),                  \
-      (long long)(arg))
-#define __sanitizer_syscall_post_compat_43_ogetkerninfo(res, op, where, size,  \
-                                                        arg)                   \
-  __sanitizer_syscall_post_impl_compat_43_ogetkerninfo(                        \
-      res, (long long)(op), (long long)(where), (long long)(size),             \
-      (long long)(arg))
-#define __sanitizer_syscall_pre_compat_43_ogetpagesize()                       \
-  __sanitizer_syscall_pre_impl_compat_43_ogetpagesize()
-#define __sanitizer_syscall_post_compat_43_ogetpagesize(res)                   \
-  __sanitizer_syscall_post_impl_compat_43_ogetpagesize(res)
-#define __sanitizer_syscall_pre_compat_12_msync(addr, len)                     \
-  __sanitizer_syscall_pre_impl_compat_12_msync((long long)(addr),              \
-                                               (long long)(len))
-#define __sanitizer_syscall_post_compat_12_msync(res, addr, len)               \
-  __sanitizer_syscall_post_impl_compat_12_msync(res, (long long)(addr),        \
-                                                (long long)(len))
-#define __sanitizer_syscall_pre_vfork() __sanitizer_syscall_pre_impl_vfork()
-#define __sanitizer_syscall_post_vfork(res)                                    \
-  __sanitizer_syscall_post_impl_vfork(res)
-/* syscall 67 has been skipped */
-/* syscall 68 has been skipped */
-/* syscall 69 has been skipped */
-/* syscall 70 has been skipped */
-#define __sanitizer_syscall_pre_compat_43_ommap(addr, len, prot, flags, fd,    \
-                                                pos)                           \
-  __sanitizer_syscall_pre_impl_compat_43_ommap(                                \
-      (long long)(addr), (long long)(len), (long long)(prot),                  \
-      (long long)(flags), (long long)(fd), (long long)(pos))
-#define __sanitizer_syscall_post_compat_43_ommap(res, addr, len, prot, flags,  \
-                                                 fd, pos)                      \
-  __sanitizer_syscall_post_impl_compat_43_ommap(                               \
-      res, (long long)(addr), (long long)(len), (long long)(prot),             \
-      (long long)(flags), (long long)(fd), (long long)(pos))
-#define __sanitizer_syscall_pre_vadvise(anom)                                  \
-  __sanitizer_syscall_pre_impl_vadvise((long long)(anom))
-#define __sanitizer_syscall_post_vadvise(res, anom)                            \
-  __sanitizer_syscall_post_impl_vadvise(res, (long long)(anom))
-#define __sanitizer_syscall_pre_munmap(addr, len)                              \
-  __sanitizer_syscall_pre_impl_munmap((long long)(addr), (long long)(len))
-#define __sanitizer_syscall_post_munmap(res, addr, len)                        \
-  __sanitizer_syscall_post_impl_munmap(res, (long long)(addr), (long long)(len))
-#define __sanitizer_syscall_pre_mprotect(addr, len, prot)                      \
-  __sanitizer_syscall_pre_impl_mprotect((long long)(addr), (long long)(len),   \
-                                        (long long)(prot))
-#define __sanitizer_syscall_post_mprotect(res, addr, len, prot)                \
-  __sanitizer_syscall_post_impl_mprotect(res, (long long)(addr),               \
-                                         (long long)(len), (long long)(prot))
-#define __sanitizer_syscall_pre_madvise(addr, len, behav)                      \
-  __sanitizer_syscall_pre_impl_madvise((long long)(addr), (long long)(len),    \
-                                       (long long)(behav))
-#define __sanitizer_syscall_post_madvise(res, addr, len, behav)                \
-  __sanitizer_syscall_post_impl_madvise(res, (long long)(addr),                \
-                                        (long long)(len), (long long)(behav))
-/* syscall 76 has been skipped */
-/* syscall 77 has been skipped */
-#define __sanitizer_syscall_pre_mincore(addr, len, vec)                        \
-  __sanitizer_syscall_pre_impl_mincore((long long)(addr), (long long)(len),    \
-                                       (long long)(vec))
-#define __sanitizer_syscall_post_mincore(res, addr, len, vec)                  \
-  __sanitizer_syscall_post_impl_mincore(res, (long long)(addr),                \
-                                        (long long)(len), (long long)(vec))
-#define __sanitizer_syscall_pre_getgroups(gidsetsize, gidset)                  \
-  __sanitizer_syscall_pre_impl_getgroups((long long)(gidsetsize),              \
-                                         (long long)(gidset))
-#define __sanitizer_syscall_post_getgroups(res, gidsetsize, gidset)            \
-  __sanitizer_syscall_post_impl_getgroups(res, (long long)(gidsetsize),        \
-                                          (long long)(gidset))
-#define __sanitizer_syscall_pre_setgroups(gidsetsize, gidset)                  \
-  __sanitizer_syscall_pre_impl_setgroups((long long)(gidsetsize),              \
-                                         (long long)(gidset))
-#define __sanitizer_syscall_post_setgroups(res, gidsetsize, gidset)            \
-  __sanitizer_syscall_post_impl_setgroups(res, (long long)(gidsetsize),        \
-                                          (long long)(gidset))
-#define __sanitizer_syscall_pre_getpgrp() __sanitizer_syscall_pre_impl_getpgrp()
-#define __sanitizer_syscall_post_getpgrp(res)                                  \
-  __sanitizer_syscall_post_impl_getpgrp(res)
-#define __sanitizer_syscall_pre_setpgid(pid, pgid)                             \
-  __sanitizer_syscall_pre_impl_setpgid((long long)(pid), (long long)(pgid))
-#define __sanitizer_syscall_post_setpgid(res, pid, pgid)                       \
-  __sanitizer_syscall_post_impl_setpgid(res, (long long)(pid),                 \
-                                        (long long)(pgid))
-#define __sanitizer_syscall_pre_compat_50_setitimer(which, itv, oitv)          \
-  __sanitizer_syscall_pre_impl_compat_50_setitimer(                            \
-      (long long)(which), (long long)(itv), (long long)(oitv))
-#define __sanitizer_syscall_post_compat_50_setitimer(res, which, itv, oitv)    \
-  __sanitizer_syscall_post_impl_compat_50_setitimer(                           \
-      res, (long long)(which), (long long)(itv), (long long)(oitv))
-#define __sanitizer_syscall_pre_compat_43_owait()                              \
-  __sanitizer_syscall_pre_impl_compat_43_owait()
-#define __sanitizer_syscall_post_compat_43_owait(res)                          \
-  __sanitizer_syscall_post_impl_compat_43_owait(res)
-#define __sanitizer_syscall_pre_compat_12_oswapon(name)                        \
-  __sanitizer_syscall_pre_impl_compat_12_oswapon((long long)(name))
-#define __sanitizer_syscall_post_compat_12_oswapon(res, name)                  \
-  __sanitizer_syscall_post_impl_compat_12_oswapon(res, (long long)(name))
-#define __sanitizer_syscall_pre_compat_50_getitimer(which, itv)                \
-  __sanitizer_syscall_pre_impl_compat_50_getitimer((long long)(which),         \
-                                                   (long long)(itv))
-#define __sanitizer_syscall_post_compat_50_getitimer(res, which, itv)          \
-  __sanitizer_syscall_post_impl_compat_50_getitimer(res, (long long)(which),   \
-                                                    (long long)(itv))
-#define __sanitizer_syscall_pre_compat_43_ogethostname(hostname, len)          \
-  __sanitizer_syscall_pre_impl_compat_43_ogethostname((long long)(hostname),   \
-                                                      (long long)(len))
-#define __sanitizer_syscall_post_compat_43_ogethostname(res, hostname, len)    \
-  __sanitizer_syscall_post_impl_compat_43_ogethostname(                        \
-      res, (long long)(hostname), (long long)(len))
-#define __sanitizer_syscall_pre_compat_43_osethostname(hostname, len)          \
-  __sanitizer_syscall_pre_impl_compat_43_osethostname((long long)(hostname),   \
-                                                      (long long)(len))
-#define __sanitizer_syscall_post_compat_43_osethostname(res, hostname, len)    \
-  __sanitizer_syscall_post_impl_compat_43_osethostname(                        \
-      res, (long long)(hostname), (long long)(len))
-#define __sanitizer_syscall_pre_compat_43_ogetdtablesize()                     \
-  __sanitizer_syscall_pre_impl_compat_43_ogetdtablesize()
-#define __sanitizer_syscall_post_compat_43_ogetdtablesize(res)                 \
-  __sanitizer_syscall_post_impl_compat_43_ogetdtablesize(res)
-#define __sanitizer_syscall_pre_dup2(from, to)                                 \
-  __sanitizer_syscall_pre_impl_dup2((long long)(from), (long long)(to))
-#define __sanitizer_syscall_post_dup2(res, from, to)                           \
-  __sanitizer_syscall_post_impl_dup2(res, (long long)(from), (long long)(to))
-/* syscall 91 has been skipped */
-#define __sanitizer_syscall_pre_fcntl(fd, cmd, arg)                            \
-  __sanitizer_syscall_pre_impl_fcntl((long long)(fd), (long long)(cmd),        \
-                                     (long long)(arg))
-#define __sanitizer_syscall_post_fcntl(res, fd, cmd, arg)                      \
-  __sanitizer_syscall_post_impl_fcntl(res, (long long)(fd), (long long)(cmd),  \
-                                      (long long)(arg))
-#define __sanitizer_syscall_pre_compat_50_select(nd, in, ou, ex, tv)           \
-  __sanitizer_syscall_pre_impl_compat_50_select(                               \
-      (long long)(nd), (long long)(in), (long long)(ou), (long long)(ex),      \
-      (long long)(tv))
-#define __sanitizer_syscall_post_compat_50_select(res, nd, in, ou, ex, tv)     \
-  __sanitizer_syscall_post_impl_compat_50_select(                              \
-      res, (long long)(nd), (long long)(in), (long long)(ou), (long long)(ex), \
-      (long long)(tv))
-/* syscall 94 has been skipped */
-#define __sanitizer_syscall_pre_fsync(fd)                                      \
-  __sanitizer_syscall_pre_impl_fsync((long long)(fd))
-#define __sanitizer_syscall_post_fsync(res, fd)                                \
-  __sanitizer_syscall_post_impl_fsync(res, (long long)(fd))
-#define __sanitizer_syscall_pre_setpriority(which, who, prio)                  \
-  __sanitizer_syscall_pre_impl_setpriority(                                    \
-      (long long)(which), (long long)(who), (long long)(prio))
-#define __sanitizer_syscall_post_setpriority(res, which, who, prio)            \
-  __sanitizer_syscall_post_impl_setpriority(                                   \
-      res, (long long)(which), (long long)(who), (long long)(prio))
-#define __sanitizer_syscall_pre_compat_30_socket(domain, type, protocol)       \
-  __sanitizer_syscall_pre_impl_compat_30_socket(                               \
-      (long long)(domain), (long long)(type), (long long)(protocol))
-#define __sanitizer_syscall_post_compat_30_socket(res, domain, type, protocol) \
-  __sanitizer_syscall_post_impl_compat_30_socket(                              \
-      res, (long long)(domain), (long long)(type), (long long)(protocol))
-#define __sanitizer_syscall_pre_connect(s, name, namelen)                      \
-  __sanitizer_syscall_pre_impl_connect((long long)(s), (long long)(name),      \
-                                       (long long)(namelen))
-#define __sanitizer_syscall_post_connect(res, s, name, namelen)                \
-  __sanitizer_syscall_post_impl_connect(                                       \
-      res, (long long)(s), (long long)(name), (long long)(namelen))
-#define __sanitizer_syscall_pre_compat_43_oaccept(s, name, anamelen)           \
-  __sanitizer_syscall_pre_impl_compat_43_oaccept(                              \
-      (long long)(s), (long long)(name), (long long)(anamelen))
-#define __sanitizer_syscall_post_compat_43_oaccept(res, s, name, anamelen)     \
-  __sanitizer_syscall_post_impl_compat_43_oaccept(                             \
-      res, (long long)(s), (long long)(name), (long long)(anamelen))
-#define __sanitizer_syscall_pre_getpriority(which, who)                        \
-  __sanitizer_syscall_pre_impl_getpriority((long long)(which), (long long)(who))
-#define __sanitizer_syscall_post_getpriority(res, which, who)                  \
-  __sanitizer_syscall_post_impl_getpriority(res, (long long)(which),           \
-                                            (long long)(who))
-#define __sanitizer_syscall_pre_compat_43_osend(s, buf, len, flags)            \
-  __sanitizer_syscall_pre_impl_compat_43_osend(                                \
-      (long long)(s), (long long)(buf), (long long)(len), (long long)(flags))
-#define __sanitizer_syscall_post_compat_43_osend(res, s, buf, len, flags)      \
-  __sanitizer_syscall_post_impl_compat_43_osend(                               \
-      res, (long long)(s), (long long)(buf), (long long)(len),                 \
-      (long long)(flags))
-#define __sanitizer_syscall_pre_compat_43_orecv(s, buf, len, flags)            \
-  __sanitizer_syscall_pre_impl_compat_43_orecv(                                \
-      (long long)(s), (long long)(buf), (long long)(len), (long long)(flags))
-#define __sanitizer_syscall_post_compat_43_orecv(res, s, buf, len, flags)      \
-  __sanitizer_syscall_post_impl_compat_43_orecv(                               \
-      res, (long long)(s), (long long)(buf), (long long)(len),                 \
-      (long long)(flags))
-#define __sanitizer_syscall_pre_compat_13_sigreturn13(sigcntxp)                \
-  __sanitizer_syscall_pre_impl_compat_13_sigreturn13((long long)(sigcntxp))
-#define __sanitizer_syscall_post_compat_13_sigreturn13(res, sigcntxp)          \
-  __sanitizer_syscall_post_impl_compat_13_sigreturn13(res,                     \
-                                                      (long long)(sigcntxp))
-#define __sanitizer_syscall_pre_bind(s, name, namelen)                         \
-  __sanitizer_syscall_pre_impl_bind((long long)(s), (long long)(name),         \
-                                    (long long)(namelen))
-#define __sanitizer_syscall_post_bind(res, s, name, namelen)                   \
-  __sanitizer_syscall_post_impl_bind(res, (long long)(s), (long long)(name),   \
-                                     (long long)(namelen))
-#define __sanitizer_syscall_pre_setsockopt(s, level, name, val, valsize)       \
-  __sanitizer_syscall_pre_impl_setsockopt((long long)(s), (long long)(level),  \
-                                          (long long)(name), (long long)(val), \
-                                          (long long)(valsize))
-#define __sanitizer_syscall_post_setsockopt(res, s, level, name, val, valsize) \
-  __sanitizer_syscall_post_impl_setsockopt(                                    \
-      res, (long long)(s), (long long)(level), (long long)(name),              \
-      (long long)(val), (long long)(valsize))
-#define __sanitizer_syscall_pre_listen(s, backlog)                             \
-  __sanitizer_syscall_pre_impl_listen((long long)(s), (long long)(backlog))
-#define __sanitizer_syscall_post_listen(res, s, backlog)                       \
-  __sanitizer_syscall_post_impl_listen(res, (long long)(s),                    \
-                                       (long long)(backlog))
-/* syscall 107 has been skipped */
-#define __sanitizer_syscall_pre_compat_43_osigvec(signum, nsv, osv)            \
-  __sanitizer_syscall_pre_impl_compat_43_osigvec(                              \
-      (long long)(signum), (long long)(nsv), (long long)(osv))
-#define __sanitizer_syscall_post_compat_43_osigvec(res, signum, nsv, osv)      \
-  __sanitizer_syscall_post_impl_compat_43_osigvec(                             \
-      res, (long long)(signum), (long long)(nsv), (long long)(osv))
-#define __sanitizer_syscall_pre_compat_43_osigblock(mask)                      \
-  __sanitizer_syscall_pre_impl_compat_43_osigblock((long long)(mask))
-#define __sanitizer_syscall_post_compat_43_osigblock(res, mask)                \
-  __sanitizer_syscall_post_impl_compat_43_osigblock(res, (long long)(mask))
-#define __sanitizer_syscall_pre_compat_43_osigsetmask(mask)                    \
-  __sanitizer_syscall_pre_impl_compat_43_osigsetmask((long long)(mask))
-#define __sanitizer_syscall_post_compat_43_osigsetmask(res, mask)              \
-  __sanitizer_syscall_post_impl_compat_43_osigsetmask(res, (long long)(mask))
-#define __sanitizer_syscall_pre_compat_13_sigsuspend13(mask)                   \
-  __sanitizer_syscall_pre_impl_compat_13_sigsuspend13((long long)(mask))
-#define __sanitizer_syscall_post_compat_13_sigsuspend13(res, mask)             \
-  __sanitizer_syscall_post_impl_compat_13_sigsuspend13(res, (long long)(mask))
-#define __sanitizer_syscall_pre_compat_43_osigstack(nss, oss)                  \
-  __sanitizer_syscall_pre_impl_compat_43_osigstack((long long)(nss),           \
-                                                   (long long)(oss))
-#define __sanitizer_syscall_post_compat_43_osigstack(res, nss, oss)            \
-  __sanitizer_syscall_post_impl_compat_43_osigstack(res, (long long)(nss),     \
-                                                    (long long)(oss))
-#define __sanitizer_syscall_pre_compat_43_orecvmsg(s, msg, flags)              \
-  __sanitizer_syscall_pre_impl_compat_43_orecvmsg(                             \
-      (long long)(s), (long long)(msg), (long long)(flags))
-#define __sanitizer_syscall_post_compat_43_orecvmsg(res, s, msg, flags)        \
-  __sanitizer_syscall_post_impl_compat_43_orecvmsg(                            \
-      res, (long long)(s), (long long)(msg), (long long)(flags))
-#define __sanitizer_syscall_pre_compat_43_osendmsg(s, msg, flags)              \
-  __sanitizer_syscall_pre_impl_compat_43_osendmsg(                             \
-      (long long)(s), (long long)(msg), (long long)(flags))
-#define __sanitizer_syscall_post_compat_43_osendmsg(res, s, msg, flags)        \
-  __sanitizer_syscall_post_impl_compat_43_osendmsg(                            \
-      res, (long long)(s), (long long)(msg), (long long)(flags))
-/* syscall 115 has been skipped */
-#define __sanitizer_syscall_pre_compat_50_gettimeofday(tp, tzp)                \
-  __sanitizer_syscall_pre_impl_compat_50_gettimeofday((long long)(tp),         \
-                                                      (long long)(tzp))
-#define __sanitizer_syscall_post_compat_50_gettimeofday(res, tp, tzp)          \
-  __sanitizer_syscall_post_impl_compat_50_gettimeofday(res, (long long)(tp),   \
-                                                       (long long)(tzp))
-#define __sanitizer_syscall_pre_compat_50_getrusage(who, rusage)               \
-  __sanitizer_syscall_pre_impl_compat_50_getrusage((long long)(who),           \
-                                                   (long long)(rusage))
-#define __sanitizer_syscall_post_compat_50_getrusage(res, who, rusage)         \
-  __sanitizer_syscall_post_impl_compat_50_getrusage(res, (long long)(who),     \
-                                                    (long long)(rusage))
-#define __sanitizer_syscall_pre_getsockopt(s, level, name, val, avalsize)      \
-  __sanitizer_syscall_pre_impl_getsockopt((long long)(s), (long long)(level),  \
-                                          (long long)(name), (long long)(val), \
-                                          (long long)(avalsize))
-#define __sanitizer_syscall_post_getsockopt(res, s, level, name, val,          \
-                                            avalsize)                          \
-  __sanitizer_syscall_post_impl_getsockopt(                                    \
-      res, (long long)(s), (long long)(level), (long long)(name),              \
-      (long long)(val), (long long)(avalsize))
-/* syscall 119 has been skipped */
-#define __sanitizer_syscall_pre_readv(fd, iovp, iovcnt)                        \
-  __sanitizer_syscall_pre_impl_readv((long long)(fd), (long long)(iovp),       \
-                                     (long long)(iovcnt))
-#define __sanitizer_syscall_post_readv(res, fd, iovp, iovcnt)                  \
-  __sanitizer_syscall_post_impl_readv(res, (long long)(fd), (long long)(iovp), \
-                                      (long long)(iovcnt))
-#define __sanitizer_syscall_pre_writev(fd, iovp, iovcnt)                       \
-  __sanitizer_syscall_pre_impl_writev((long long)(fd), (long long)(iovp),      \
-                                      (long long)(iovcnt))
-#define __sanitizer_syscall_post_writev(res, fd, iovp, iovcnt)                 \
-  __sanitizer_syscall_post_impl_writev(res, (long long)(fd),                   \
-                                       (long long)(iovp), (long long)(iovcnt))
-#define __sanitizer_syscall_pre_compat_50_settimeofday(tv, tzp)                \
-  __sanitizer_syscall_pre_impl_compat_50_settimeofday((long long)(tv),         \
-                                                      (long long)(tzp))
-#define __sanitizer_syscall_post_compat_50_settimeofday(res, tv, tzp)          \
-  __sanitizer_syscall_post_impl_compat_50_settimeofday(res, (long long)(tv),   \
-                                                       (long long)(tzp))
-#define __sanitizer_syscall_pre_fchown(fd, uid, gid)                           \
-  __sanitizer_syscall_pre_impl_fchown((long long)(fd), (long long)(uid),       \
-                                      (long long)(gid))
-#define __sanitizer_syscall_post_fchown(res, fd, uid, gid)                     \
-  __sanitizer_syscall_post_impl_fchown(res, (long long)(fd), (long long)(uid), \
-                                       (long long)(gid))
-#define __sanitizer_syscall_pre_fchmod(fd, mode)                               \
-  __sanitizer_syscall_pre_impl_fchmod((long long)(fd), (long long)(mode))
-#define __sanitizer_syscall_post_fchmod(res, fd, mode)                         \
-  __sanitizer_syscall_post_impl_fchmod(res, (long long)(fd), (long long)(mode))
-#define __sanitizer_syscall_pre_compat_43_orecvfrom(s, buf, len, flags, from,  \
-                                                    fromlenaddr)               \
-  __sanitizer_syscall_pre_impl_compat_43_orecvfrom(                            \
-      (long long)(s), (long long)(buf), (long long)(len), (long long)(flags),  \
-      (long long)(from), (long long)(fromlenaddr))
-#define __sanitizer_syscall_post_compat_43_orecvfrom(res, s, buf, len, flags,  \
-                                                     from, fromlenaddr)        \
-  __sanitizer_syscall_post_impl_compat_43_orecvfrom(                           \
-      res, (long long)(s), (long long)(buf), (long long)(len),                 \
-      (long long)(flags), (long long)(from), (long long)(fromlenaddr))
-#define __sanitizer_syscall_pre_setreuid(ruid, euid)                           \
-  __sanitizer_syscall_pre_impl_setreuid((long long)(ruid), (long long)(euid))
-#define __sanitizer_syscall_post_setreuid(res, ruid, euid)                     \
-  __sanitizer_syscall_post_impl_setreuid(res, (long long)(ruid),               \
-                                         (long long)(euid))
-#define __sanitizer_syscall_pre_setregid(rgid, egid)                           \
-  __sanitizer_syscall_pre_impl_setregid((long long)(rgid), (long long)(egid))
-#define __sanitizer_syscall_post_setregid(res, rgid, egid)                     \
-  __sanitizer_syscall_post_impl_setregid(res, (long long)(rgid),               \
-                                         (long long)(egid))
-#define __sanitizer_syscall_pre_rename(from, to)                               \
-  __sanitizer_syscall_pre_impl_rename((long long)(from), (long long)(to))
-#define __sanitizer_syscall_post_rename(res, from, to)                         \
-  __sanitizer_syscall_post_impl_rename(res, (long long)(from), (long long)(to))
-#define __sanitizer_syscall_pre_compat_43_otruncate(path, length)              \
-  __sanitizer_syscall_pre_impl_compat_43_otruncate((long long)(path),          \
-                                                   (long long)(length))
-#define __sanitizer_syscall_post_compat_43_otruncate(res, path, length)        \
-  __sanitizer_syscall_post_impl_compat_43_otruncate(res, (long long)(path),    \
-                                                    (long long)(length))
-#define __sanitizer_syscall_pre_compat_43_oftruncate(fd, length)               \
-  __sanitizer_syscall_pre_impl_compat_43_oftruncate((long long)(fd),           \
-                                                    (long long)(length))
-#define __sanitizer_syscall_post_compat_43_oftruncate(res, fd, length)         \
-  __sanitizer_syscall_post_impl_compat_43_oftruncate(res, (long long)(fd),     \
-                                                     (long long)(length))
-#define __sanitizer_syscall_pre_flock(fd, how)                                 \
-  __sanitizer_syscall_pre_impl_flock((long long)(fd), (long long)(how))
-#define __sanitizer_syscall_post_flock(res, fd, how)                           \
-  __sanitizer_syscall_post_impl_flock(res, (long long)(fd), (long long)(how))
-#define __sanitizer_syscall_pre_mkfifo(path, mode)                             \
-  __sanitizer_syscall_pre_impl_mkfifo((long long)(path), (long long)(mode))
-#define __sanitizer_syscall_post_mkfifo(res, path, mode)                       \
-  __sanitizer_syscall_post_impl_mkfifo(res, (long long)(path),                 \
-                                       (long long)(mode))
-#define __sanitizer_syscall_pre_sendto(s, buf, len, flags, to, tolen)          \
-  __sanitizer_syscall_pre_impl_sendto((long long)(s), (long long)(buf),        \
-                                      (long long)(len), (long long)(flags),    \
-                                      (long long)(to), (long long)(tolen))
-#define __sanitizer_syscall_post_sendto(res, s, buf, len, flags, to, tolen)    \
-  __sanitizer_syscall_post_impl_sendto(res, (long long)(s), (long long)(buf),  \
-                                       (long long)(len), (long long)(flags),   \
-                                       (long long)(to), (long long)(tolen))
-#define __sanitizer_syscall_pre_shutdown(s, how)                               \
-  __sanitizer_syscall_pre_impl_shutdown((long long)(s), (long long)(how))
-#define __sanitizer_syscall_post_shutdown(res, s, how)                         \
-  __sanitizer_syscall_post_impl_shutdown(res, (long long)(s), (long long)(how))
-#define __sanitizer_syscall_pre_socketpair(domain, type, protocol, rsv)        \
-  __sanitizer_syscall_pre_impl_socketpair(                                     \
-      (long long)(domain), (long long)(type), (long long)(protocol),           \
-      (long long)(rsv))
-#define __sanitizer_syscall_post_socketpair(res, domain, type, protocol, rsv)  \
-  __sanitizer_syscall_post_impl_socketpair(                                    \
-      res, (long long)(domain), (long long)(type), (long long)(protocol),      \
-      (long long)(rsv))
-#define __sanitizer_syscall_pre_mkdir(path, mode)                              \
-  __sanitizer_syscall_pre_impl_mkdir((long long)(path), (long long)(mode))
-#define __sanitizer_syscall_post_mkdir(res, path, mode)                        \
-  __sanitizer_syscall_post_impl_mkdir(res, (long long)(path), (long long)(mode))
-#define __sanitizer_syscall_pre_rmdir(path)                                    \
-  __sanitizer_syscall_pre_impl_rmdir((long long)(path))
-#define __sanitizer_syscall_post_rmdir(res, path)                              \
-  __sanitizer_syscall_post_impl_rmdir(res, (long long)(path))
-#define __sanitizer_syscall_pre_compat_50_utimes(path, tptr)                   \
-  __sanitizer_syscall_pre_impl_compat_50_utimes((long long)(path),             \
-                                                (long long)(tptr))
-#define __sanitizer_syscall_post_compat_50_utimes(res, path, tptr)             \
-  __sanitizer_syscall_post_impl_compat_50_utimes(res, (long long)(path),       \
-                                                 (long long)(tptr))
-/* syscall 139 has been skipped */
-#define __sanitizer_syscall_pre_compat_50_adjtime(delta, olddelta)             \
-  __sanitizer_syscall_pre_impl_compat_50_adjtime((long long)(delta),           \
-                                                 (long long)(olddelta))
-#define __sanitizer_syscall_post_compat_50_adjtime(res, delta, olddelta)       \
-  __sanitizer_syscall_post_impl_compat_50_adjtime(res, (long long)(delta),     \
-                                                  (long long)(olddelta))
-#define __sanitizer_syscall_pre_compat_43_ogetpeername(fdes, asa, alen)        \
-  __sanitizer_syscall_pre_impl_compat_43_ogetpeername(                         \
-      (long long)(fdes), (long long)(asa), (long long)(alen))
-#define __sanitizer_syscall_post_compat_43_ogetpeername(res, fdes, asa, alen)  \
-  __sanitizer_syscall_post_impl_compat_43_ogetpeername(                        \
-      res, (long long)(fdes), (long long)(asa), (long long)(alen))
-#define __sanitizer_syscall_pre_compat_43_ogethostid()                         \
-  __sanitizer_syscall_pre_impl_compat_43_ogethostid()
-#define __sanitizer_syscall_post_compat_43_ogethostid(res)                     \
-  __sanitizer_syscall_post_impl_compat_43_ogethostid(res)
-#define __sanitizer_syscall_pre_compat_43_osethostid(hostid)                   \
-  __sanitizer_syscall_pre_impl_compat_43_osethostid((long long)(hostid))
-#define __sanitizer_syscall_post_compat_43_osethostid(res, hostid)             \
-  __sanitizer_syscall_post_impl_compat_43_osethostid(res, (long long)(hostid))
-#define __sanitizer_syscall_pre_compat_43_ogetrlimit(which, rlp)               \
-  __sanitizer_syscall_pre_impl_compat_43_ogetrlimit((long long)(which),        \
-                                                    (long long)(rlp))
-#define __sanitizer_syscall_post_compat_43_ogetrlimit(res, which, rlp)         \
-  __sanitizer_syscall_post_impl_compat_43_ogetrlimit(res, (long long)(which),  \
-                                                     (long long)(rlp))
-#define __sanitizer_syscall_pre_compat_43_osetrlimit(which, rlp)               \
-  __sanitizer_syscall_pre_impl_compat_43_osetrlimit((long long)(which),        \
-                                                    (long long)(rlp))
-#define __sanitizer_syscall_post_compat_43_osetrlimit(res, which, rlp)         \
-  __sanitizer_syscall_post_impl_compat_43_osetrlimit(res, (long long)(which),  \
-                                                     (long long)(rlp))
-#define __sanitizer_syscall_pre_compat_43_okillpg(pgid, signum)                \
-  __sanitizer_syscall_pre_impl_compat_43_okillpg((long long)(pgid),            \
-                                                 (long long)(signum))
-#define __sanitizer_syscall_post_compat_43_okillpg(res, pgid, signum)          \
-  __sanitizer_syscall_post_impl_compat_43_okillpg(res, (long long)(pgid),      \
-                                                  (long long)(signum))
-#define __sanitizer_syscall_pre_setsid() __sanitizer_syscall_pre_impl_setsid()
-#define __sanitizer_syscall_post_setsid(res)                                   \
-  __sanitizer_syscall_post_impl_setsid(res)
-#define __sanitizer_syscall_pre_compat_50_quotactl(path, cmd, uid, arg)        \
-  __sanitizer_syscall_pre_impl_compat_50_quotactl(                             \
-      (long long)(path), (long long)(cmd), (long long)(uid), (long long)(arg))
-#define __sanitizer_syscall_post_compat_50_quotactl(res, path, cmd, uid, arg)  \
-  __sanitizer_syscall_post_impl_compat_50_quotactl(                            \
-      res, (long long)(path), (long long)(cmd), (long long)(uid),              \
-      (long long)(arg))
-#define __sanitizer_syscall_pre_compat_43_oquota()                             \
-  __sanitizer_syscall_pre_impl_compat_43_oquota()
-#define __sanitizer_syscall_post_compat_43_oquota(res)                         \
-  __sanitizer_syscall_post_impl_compat_43_oquota(res)
-#define __sanitizer_syscall_pre_compat_43_ogetsockname(fdec, asa, alen)        \
-  __sanitizer_syscall_pre_impl_compat_43_ogetsockname(                         \
-      (long long)(fdec), (long long)(asa), (long long)(alen))
-#define __sanitizer_syscall_post_compat_43_ogetsockname(res, fdec, asa, alen)  \
-  __sanitizer_syscall_post_impl_compat_43_ogetsockname(                        \
-      res, (long long)(fdec), (long long)(asa), (long long)(alen))
-/* syscall 151 has been skipped */
-/* syscall 152 has been skipped */
-/* syscall 153 has been skipped */
-/* syscall 154 has been skipped */
-#define __sanitizer_syscall_pre_nfssvc(flag, argp)                             \
-  __sanitizer_syscall_pre_impl_nfssvc((long long)(flag), (long long)(argp))
-#define __sanitizer_syscall_post_nfssvc(res, flag, argp)                       \
-  __sanitizer_syscall_post_impl_nfssvc(res, (long long)(flag),                 \
-                                       (long long)(argp))
-#define __sanitizer_syscall_pre_compat_43_ogetdirentries(fd, buf, count,       \
-                                                         basep)                \
-  __sanitizer_syscall_pre_impl_compat_43_ogetdirentries(                       \
-      (long long)(fd), (long long)(buf), (long long)(count),                   \
-      (long long)(basep))
-#define __sanitizer_syscall_post_compat_43_ogetdirentries(res, fd, buf, count, \
-                                                          basep)               \
-  __sanitizer_syscall_post_impl_compat_43_ogetdirentries(                      \
-      res, (long long)(fd), (long long)(buf), (long long)(count),              \
-      (long long)(basep))
-#define __sanitizer_syscall_pre_compat_20_statfs(path, buf)                    \
-  __sanitizer_syscall_pre_impl_compat_20_statfs((long long)(path),             \
-                                                (long long)(buf))
-#define __sanitizer_syscall_post_compat_20_statfs(res, path, buf)              \
-  __sanitizer_syscall_post_impl_compat_20_statfs(res, (long long)(path),       \
-                                                 (long long)(buf))
-#define __sanitizer_syscall_pre_compat_20_fstatfs(fd, buf)                     \
-  __sanitizer_syscall_pre_impl_compat_20_fstatfs((long long)(fd),              \
-                                                 (long long)(buf))
-#define __sanitizer_syscall_post_compat_20_fstatfs(res, fd, buf)               \
-  __sanitizer_syscall_post_impl_compat_20_fstatfs(res, (long long)(fd),        \
-                                                  (long long)(buf))
-/* syscall 159 has been skipped */
-/* syscall 160 has been skipped */
-#define __sanitizer_syscall_pre_compat_30_getfh(fname, fhp)                    \
-  __sanitizer_syscall_pre_impl_compat_30_getfh((long long)(fname),             \
-                                               (long long)(fhp))
-#define __sanitizer_syscall_post_compat_30_getfh(res, fname, fhp)              \
-  __sanitizer_syscall_post_impl_compat_30_getfh(res, (long long)(fname),       \
-                                                (long long)(fhp))
-#define __sanitizer_syscall_pre_compat_09_ogetdomainname(domainname, len)      \
-  __sanitizer_syscall_pre_impl_compat_09_ogetdomainname(                       \
-      (long long)(domainname), (long long)(len))
-#define __sanitizer_syscall_post_compat_09_ogetdomainname(res, domainname,     \
-                                                          len)                 \
-  __sanitizer_syscall_post_impl_compat_09_ogetdomainname(                      \
-      res, (long long)(domainname), (long long)(len))
-#define __sanitizer_syscall_pre_compat_09_osetdomainname(domainname, len)      \
-  __sanitizer_syscall_pre_impl_compat_09_osetdomainname(                       \
-      (long long)(domainname), (long long)(len))
-#define __sanitizer_syscall_post_compat_09_osetdomainname(res, domainname,     \
-                                                          len)                 \
-  __sanitizer_syscall_post_impl_compat_09_osetdomainname(                      \
-      res, (long long)(domainname), (long long)(len))
-#define __sanitizer_syscall_pre_compat_09_ouname(name)                         \
-  __sanitizer_syscall_pre_impl_compat_09_ouname((long long)(name))
-#define __sanitizer_syscall_post_compat_09_ouname(res, name)                   \
-  __sanitizer_syscall_post_impl_compat_09_ouname(res, (long long)(name))
-#define __sanitizer_syscall_pre_sysarch(op, parms)                             \
-  __sanitizer_syscall_pre_impl_sysarch((long long)(op), (long long)(parms))
-#define __sanitizer_syscall_post_sysarch(res, op, parms)                       \
-  __sanitizer_syscall_post_impl_sysarch(res, (long long)(op),                  \
-                                        (long long)(parms))
-/* syscall 166 has been skipped */
-/* syscall 167 has been skipped */
-/* syscall 168 has been skipped */
-#if !defined(_LP64)
-#define __sanitizer_syscall_pre_compat_10_osemsys(which, a2, a3, a4, a5)       \
-  __sanitizer_syscall_pre_impl_compat_10_osemsys(                              \
-      (long long)(which), (long long)(a2), (long long)(a3), (long long)(a4),   \
-      (long long)(a5))
-#define __sanitizer_syscall_post_compat_10_osemsys(res, which, a2, a3, a4, a5) \
-  __sanitizer_syscall_post_impl_compat_10_osemsys(                             \
-      res, (long long)(which), (long long)(a2), (long long)(a3),               \
-      (long long)(a4), (long long)(a5))
-#else
-/* syscall 169 has been skipped */
-#endif
-#if !defined(_LP64)
-#define __sanitizer_syscall_pre_compat_10_omsgsys(which, a2, a3, a4, a5, a6)   \
-  __sanitizer_syscall_pre_impl_compat_10_omsgsys(                              \
-      (long long)(which), (long long)(a2), (long long)(a3), (long long)(a4),   \
-      (long long)(a5), (long long)(a6))
-#define __sanitizer_syscall_post_compat_10_omsgsys(res, which, a2, a3, a4, a5, \
-                                                   a6)                         \
-  __sanitizer_syscall_post_impl_compat_10_omsgsys(                             \
-      res, (long long)(which), (long long)(a2), (long long)(a3),               \
-      (long long)(a4), (long long)(a5), (long long)(a6))
-#else
-/* syscall 170 has been skipped */
-#endif
-#if !defined(_LP64)
-#define __sanitizer_syscall_pre_compat_10_oshmsys(which, a2, a3, a4)           \
-  __sanitizer_syscall_pre_impl_compat_10_oshmsys(                              \
-      (long long)(which), (long long)(a2), (long long)(a3), (long long)(a4))
-#define __sanitizer_syscall_post_compat_10_oshmsys(res, which, a2, a3, a4)     \
-  __sanitizer_syscall_post_impl_compat_10_oshmsys(                             \
-      res, (long long)(which), (long long)(a2), (long long)(a3),               \
-      (long long)(a4))
-#else
-/* syscall 171 has been skipped */
-#endif
-/* syscall 172 has been skipped */
-#define __sanitizer_syscall_pre_pread(fd, buf, nbyte, PAD, offset)             \
-  __sanitizer_syscall_pre_impl_pread((long long)(fd), (long long)(buf),        \
-                                     (long long)(nbyte), (long long)(PAD),     \
-                                     (long long)(offset))
-#define __sanitizer_syscall_post_pread(res, fd, buf, nbyte, PAD, offset)       \
-  __sanitizer_syscall_post_impl_pread(res, (long long)(fd), (long long)(buf),  \
-                                      (long long)(nbyte), (long long)(PAD),    \
-                                      (long long)(offset))
-#define __sanitizer_syscall_pre_pwrite(fd, buf, nbyte, PAD, offset)            \
-  __sanitizer_syscall_pre_impl_pwrite((long long)(fd), (long long)(buf),       \
-                                      (long long)(nbyte), (long long)(PAD),    \
-                                      (long long)(offset))
-#define __sanitizer_syscall_post_pwrite(res, fd, buf, nbyte, PAD, offset)      \
-  __sanitizer_syscall_post_impl_pwrite(res, (long long)(fd), (long long)(buf), \
-                                       (long long)(nbyte), (long long)(PAD),   \
-                                       (long long)(offset))
-#define __sanitizer_syscall_pre_compat_30_ntp_gettime(ntvp)                    \
-  __sanitizer_syscall_pre_impl_compat_30_ntp_gettime((long long)(ntvp))
-#define __sanitizer_syscall_post_compat_30_ntp_gettime(res, ntvp)              \
-  __sanitizer_syscall_post_impl_compat_30_ntp_gettime(res, (long long)(ntvp))
-#if defined(NTP) || !defined(_KERNEL_OPT)
-#define __sanitizer_syscall_pre_ntp_adjtime(tp)                                \
-  __sanitizer_syscall_pre_impl_ntp_adjtime((long long)(tp))
-#define __sanitizer_syscall_post_ntp_adjtime(res, tp)                          \
-  __sanitizer_syscall_post_impl_ntp_adjtime(res, (long long)(tp))
-#else
-/* syscall 176 has been skipped */
-#endif
-/* syscall 177 has been skipped */
-/* syscall 178 has been skipped */
-/* syscall 179 has been skipped */
-/* syscall 180 has been skipped */
-#define __sanitizer_syscall_pre_setgid(gid)                                    \
-  __sanitizer_syscall_pre_impl_setgid((long long)(gid))
-#define __sanitizer_syscall_post_setgid(res, gid)                              \
-  __sanitizer_syscall_post_impl_setgid(res, (long long)(gid))
-#define __sanitizer_syscall_pre_setegid(egid)                                  \
-  __sanitizer_syscall_pre_impl_setegid((long long)(egid))
-#define __sanitizer_syscall_post_setegid(res, egid)                            \
-  __sanitizer_syscall_post_impl_setegid(res, (long long)(egid))
-#define __sanitizer_syscall_pre_seteuid(euid)                                  \
-  __sanitizer_syscall_pre_impl_seteuid((long long)(euid))
-#define __sanitizer_syscall_post_seteuid(res, euid)                            \
-  __sanitizer_syscall_post_impl_seteuid(res, (long long)(euid))
-#define __sanitizer_syscall_pre_lfs_bmapv(fsidp, blkiov, blkcnt)               \
-  __sanitizer_syscall_pre_impl_lfs_bmapv(                                      \
-      (long long)(fsidp), (long long)(blkiov), (long long)(blkcnt))
-#define __sanitizer_syscall_post_lfs_bmapv(res, fsidp, blkiov, blkcnt)         \
-  __sanitizer_syscall_post_impl_lfs_bmapv(                                     \
-      res, (long long)(fsidp), (long long)(blkiov), (long long)(blkcnt))
-#define __sanitizer_syscall_pre_lfs_markv(fsidp, blkiov, blkcnt)               \
-  __sanitizer_syscall_pre_impl_lfs_markv(                                      \
-      (long long)(fsidp), (long long)(blkiov), (long long)(blkcnt))
-#define __sanitizer_syscall_post_lfs_markv(res, fsidp, blkiov, blkcnt)         \
-  __sanitizer_syscall_post_impl_lfs_markv(                                     \
-      res, (long long)(fsidp), (long long)(blkiov), (long long)(blkcnt))
-#define __sanitizer_syscall_pre_lfs_segclean(fsidp, segment)                   \
-  __sanitizer_syscall_pre_impl_lfs_segclean((long long)(fsidp),                \
-                                            (long long)(segment))
-#define __sanitizer_syscall_post_lfs_segclean(res, fsidp, segment)             \
-  __sanitizer_syscall_post_impl_lfs_segclean(res, (long long)(fsidp),          \
-                                             (long long)(segment))
-#define __sanitizer_syscall_pre_compat_50_lfs_segwait(fsidp, tv)               \
-  __sanitizer_syscall_pre_impl_compat_50_lfs_segwait((long long)(fsidp),       \
-                                                     (long long)(tv))
-#define __sanitizer_syscall_post_compat_50_lfs_segwait(res, fsidp, tv)         \
-  __sanitizer_syscall_post_impl_compat_50_lfs_segwait(res, (long long)(fsidp), \
-                                                      (long long)(tv))
-#define __sanitizer_syscall_pre_compat_12_stat12(path, ub)                     \
-  __sanitizer_syscall_pre_impl_compat_12_stat12((long long)(path),             \
-                                                (long long)(ub))
-#define __sanitizer_syscall_post_compat_12_stat12(res, path, ub)               \
-  __sanitizer_syscall_post_impl_compat_12_stat12(res, (long long)(path),       \
-                                                 (long long)(ub))
-#define __sanitizer_syscall_pre_compat_12_fstat12(fd, sb)                      \
-  __sanitizer_syscall_pre_impl_compat_12_fstat12((long long)(fd),              \
-                                                 (long long)(sb))
-#define __sanitizer_syscall_post_compat_12_fstat12(res, fd, sb)                \
-  __sanitizer_syscall_post_impl_compat_12_fstat12(res, (long long)(fd),        \
-                                                  (long long)(sb))
-#define __sanitizer_syscall_pre_compat_12_lstat12(path, ub)                    \
-  __sanitizer_syscall_pre_impl_compat_12_lstat12((long long)(path),            \
-                                                 (long long)(ub))
-#define __sanitizer_syscall_post_compat_12_lstat12(res, path, ub)              \
-  __sanitizer_syscall_post_impl_compat_12_lstat12(res, (long long)(path),      \
-                                                  (long long)(ub))
-#define __sanitizer_syscall_pre_pathconf(path, name)                           \
-  __sanitizer_syscall_pre_impl_pathconf((long long)(path), (long long)(name))
-#define __sanitizer_syscall_post_pathconf(res, path, name)                     \
-  __sanitizer_syscall_post_impl_pathconf(res, (long long)(path),               \
-                                         (long long)(name))
-#define __sanitizer_syscall_pre_fpathconf(fd, name)                            \
-  __sanitizer_syscall_pre_impl_fpathconf((long long)(fd), (long long)(name))
-#define __sanitizer_syscall_post_fpathconf(res, fd, name)                      \
-  __sanitizer_syscall_post_impl_fpathconf(res, (long long)(fd),                \
-                                          (long long)(name))
-#define __sanitizer_syscall_pre_getsockopt2(s, level, name, val, avalsize)     \
-  __sanitizer_syscall_pre_impl_getsockopt2(                                    \
-      (long long)(s), (long long)(level), (long long)(name), (long long)(val), \
-      (long long)(avalsize))
-#define __sanitizer_syscall_post_getsockopt2(res, s, level, name, val,         \
-                                             avalsize)                         \
-  __sanitizer_syscall_post_impl_getsockopt2(                                   \
-      res, (long long)(s), (long long)(level), (long long)(name),              \
-      (long long)(val), (long long)(avalsize))
-#define __sanitizer_syscall_pre_getrlimit(which, rlp)                          \
-  __sanitizer_syscall_pre_impl_getrlimit((long long)(which), (long long)(rlp))
-#define __sanitizer_syscall_post_getrlimit(res, which, rlp)                    \
-  __sanitizer_syscall_post_impl_getrlimit(res, (long long)(which),             \
-                                          (long long)(rlp))
-#define __sanitizer_syscall_pre_setrlimit(which, rlp)                          \
-  __sanitizer_syscall_pre_impl_setrlimit((long long)(which), (long long)(rlp))
-#define __sanitizer_syscall_post_setrlimit(res, which, rlp)                    \
-  __sanitizer_syscall_post_impl_setrlimit(res, (long long)(which),             \
-                                          (long long)(rlp))
-#define __sanitizer_syscall_pre_compat_12_getdirentries(fd, buf, count, basep) \
-  __sanitizer_syscall_pre_impl_compat_12_getdirentries(                        \
-      (long long)(fd), (long long)(buf), (long long)(count),                   \
-      (long long)(basep))
-#define __sanitizer_syscall_post_compat_12_getdirentries(res, fd, buf, count,  \
-                                                         basep)                \
-  __sanitizer_syscall_post_impl_compat_12_getdirentries(                       \
-      res, (long long)(fd), (long long)(buf), (long long)(count),              \
-      (long long)(basep))
-#define __sanitizer_syscall_pre_mmap(addr, len, prot, flags, fd, PAD, pos)     \
-  __sanitizer_syscall_pre_impl_mmap(                                           \
-      (long long)(addr), (long long)(len), (long long)(prot),                  \
-      (long long)(flags), (long long)(fd), (long long)(PAD), (long long)(pos))
-#define __sanitizer_syscall_post_mmap(res, addr, len, prot, flags, fd, PAD,    \
-                                      pos)                                     \
-  __sanitizer_syscall_post_impl_mmap(                                          \
-      res, (long long)(addr), (long long)(len), (long long)(prot),             \
-      (long long)(flags), (long long)(fd), (long long)(PAD), (long long)(pos))
-#define __sanitizer_syscall_pre___syscall(code, arg0, arg1, arg2, arg3, arg4,  \
-                                          arg5, arg6, arg7)                    \
-  __sanitizer_syscall_pre_impl___syscall(                                      \
-      (long long)(code), (long long)(arg0), (long long)(arg1),                 \
-      (long long)(arg2), (long long)(arg3), (long long)(arg4),                 \
-      (long long)(arg5), (long long)(arg6), (long long)(arg7))
-#define __sanitizer_syscall_post___syscall(res, code, arg0, arg1, arg2, arg3,  \
-                                           arg4, arg5, arg6, arg7)             \
-  __sanitizer_syscall_post_impl___syscall(                                     \
-      res, (long long)(code), (long long)(arg0), (long long)(arg1),            \
-      (long long)(arg2), (long long)(arg3), (long long)(arg4),                 \
-      (long long)(arg5), (long long)(arg6), (long long)(arg7))
-#define __sanitizer_syscall_pre_lseek(fd, PAD, offset, whence)                 \
-  __sanitizer_syscall_pre_impl_lseek((long long)(fd), (long long)(PAD),        \
-                                     (long long)(offset), (long long)(whence))
-#define __sanitizer_syscall_post_lseek(res, fd, PAD, offset, whence)           \
-  __sanitizer_syscall_post_impl_lseek(res, (long long)(fd), (long long)(PAD),  \
-                                      (long long)(offset),                     \
-                                      (long long)(whence))
-#define __sanitizer_syscall_pre_truncate(path, PAD, length)                    \
-  __sanitizer_syscall_pre_impl_truncate((long long)(path), (long long)(PAD),   \
-                                        (long long)(length))
-#define __sanitizer_syscall_post_truncate(res, path, PAD, length)              \
-  __sanitizer_syscall_post_impl_truncate(                                      \
-      res, (long long)(path), (long long)(PAD), (long long)(length))
-#define __sanitizer_syscall_pre_ftruncate(fd, PAD, length)                     \
-  __sanitizer_syscall_pre_impl_ftruncate((long long)(fd), (long long)(PAD),    \
-                                         (long long)(length))
-#define __sanitizer_syscall_post_ftruncate(res, fd, PAD, length)               \
-  __sanitizer_syscall_post_impl_ftruncate(                                     \
-      res, (long long)(fd), (long long)(PAD), (long long)(length))
-#define __sanitizer_syscall_pre___sysctl(name, namelen, oldv, oldlenp, newv,   \
-                                         newlen)                               \
-  __sanitizer_syscall_pre_impl___sysctl(                                       \
-      (long long)(name), (long long)(namelen), (long long)(oldv),              \
-      (long long)(oldlenp), (long long)(newv), (long long)(newlen))
-#define __sanitizer_syscall_post___sysctl(res, name, namelen, oldv, oldlenp,   \
-                                          newv, newlen)                        \
-  __sanitizer_syscall_post_impl___sysctl(                                      \
-      res, (long long)(name), (long long)(namelen), (long long)(oldv),         \
-      (long long)(oldlenp), (long long)(newv), (long long)(newlen))
-#define __sanitizer_syscall_pre_mlock(addr, len)                               \
-  __sanitizer_syscall_pre_impl_mlock((long long)(addr), (long long)(len))
-#define __sanitizer_syscall_post_mlock(res, addr, len)                         \
-  __sanitizer_syscall_post_impl_mlock(res, (long long)(addr), (long long)(len))
-#define __sanitizer_syscall_pre_munlock(addr, len)                             \
-  __sanitizer_syscall_pre_impl_munlock((long long)(addr), (long long)(len))
-#define __sanitizer_syscall_post_munlock(res, addr, len)                       \
-  __sanitizer_syscall_post_impl_munlock(res, (long long)(addr),                \
-                                        (long long)(len))
-#define __sanitizer_syscall_pre_undelete(path)                                 \
-  __sanitizer_syscall_pre_impl_undelete((long long)(path))
-#define __sanitizer_syscall_post_undelete(res, path)                           \
-  __sanitizer_syscall_post_impl_undelete(res, (long long)(path))
-#define __sanitizer_syscall_pre_compat_50_futimes(fd, tptr)                    \
-  __sanitizer_syscall_pre_impl_compat_50_futimes((long long)(fd),              \
-                                                 (long long)(tptr))
-#define __sanitizer_syscall_post_compat_50_futimes(res, fd, tptr)              \
-  __sanitizer_syscall_post_impl_compat_50_futimes(res, (long long)(fd),        \
-                                                  (long long)(tptr))
-#define __sanitizer_syscall_pre_getpgid(pid)                                   \
-  __sanitizer_syscall_pre_impl_getpgid((long long)(pid))
-#define __sanitizer_syscall_post_getpgid(res, pid)                             \
-  __sanitizer_syscall_post_impl_getpgid(res, (long long)(pid))
-#define __sanitizer_syscall_pre_reboot(opt, bootstr)                           \
-  __sanitizer_syscall_pre_impl_reboot((long long)(opt), (long long)(bootstr))
-#define __sanitizer_syscall_post_reboot(res, opt, bootstr)                     \
-  __sanitizer_syscall_post_impl_reboot(res, (long long)(opt),                  \
-                                       (long long)(bootstr))
-#define __sanitizer_syscall_pre_poll(fds, nfds, timeout)                       \
-  __sanitizer_syscall_pre_impl_poll((long long)(fds), (long long)(nfds),       \
-                                    (long long)(timeout))
-#define __sanitizer_syscall_post_poll(res, fds, nfds, timeout)                 \
-  __sanitizer_syscall_post_impl_poll(res, (long long)(fds), (long long)(nfds), \
-                                     (long long)(timeout))
-#define __sanitizer_syscall_pre_afssys(id, a1, a2, a3, a4, a5, a6)             \
-  __sanitizer_syscall_pre_impl_afssys(                                         \
-      (long long)(id), (long long)(a1), (long long)(a2), (long long)(a3),      \
-      (long long)(a4), (long long)(a5), (long long)(a6))
-#define __sanitizer_syscall_post_afssys(res, id, a1, a2, a3, a4, a5, a6)       \
-  __sanitizer_syscall_post_impl_afssys(                                        \
-      res, (long long)(id), (long long)(a1), (long long)(a2), (long long)(a3), \
-      (long long)(a4), (long long)(a5), (long long)(a6))
-/* syscall 211 has been skipped */
-/* syscall 212 has been skipped */
-/* syscall 213 has been skipped */
-/* syscall 214 has been skipped */
-/* syscall 215 has been skipped */
-/* syscall 216 has been skipped */
-/* syscall 217 has been skipped */
-/* syscall 218 has been skipped */
-/* syscall 219 has been skipped */
-#define __sanitizer_syscall_pre_compat_14___semctl(semid, semnum, cmd, arg)    \
-  __sanitizer_syscall_pre_impl_compat_14___semctl(                             \
-      (long long)(semid), (long long)(semnum), (long long)(cmd),               \
-      (long long)(arg))
-#define __sanitizer_syscall_post_compat_14___semctl(res, semid, semnum, cmd,   \
-                                                    arg)                       \
-  __sanitizer_syscall_post_impl_compat_14___semctl(                            \
-      res, (long long)(semid), (long long)(semnum), (long long)(cmd),          \
-      (long long)(arg))
-#define __sanitizer_syscall_pre_semget(key, nsems, semflg)                     \
-  __sanitizer_syscall_pre_impl_semget((long long)(key), (long long)(nsems),    \
-                                      (long long)(semflg))
-#define __sanitizer_syscall_post_semget(res, key, nsems, semflg)               \
-  __sanitizer_syscall_post_impl_semget(                                        \
-      res, (long long)(key), (long long)(nsems), (long long)(semflg))
-#define __sanitizer_syscall_pre_semop(semid, sops, nsops)                      \
-  __sanitizer_syscall_pre_impl_semop((long long)(semid), (long long)(sops),    \
-                                     (long long)(nsops))
-#define __sanitizer_syscall_post_semop(res, semid, sops, nsops)                \
-  __sanitizer_syscall_post_impl_semop(res, (long long)(semid),                 \
-                                      (long long)(sops), (long long)(nsops))
-#define __sanitizer_syscall_pre_semconfig(flag)                                \
-  __sanitizer_syscall_pre_impl_semconfig((long long)(flag))
-#define __sanitizer_syscall_post_semconfig(res, flag)                          \
-  __sanitizer_syscall_post_impl_semconfig(res, (long long)(flag))
-#define __sanitizer_syscall_pre_compat_14_msgctl(msqid, cmd, buf)              \
-  __sanitizer_syscall_pre_impl_compat_14_msgctl(                               \
-      (long long)(msqid), (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_post_compat_14_msgctl(res, msqid, cmd, buf)        \
-  __sanitizer_syscall_post_impl_compat_14_msgctl(                              \
-      res, (long long)(msqid), (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_pre_msgget(key, msgflg)                            \
-  __sanitizer_syscall_pre_impl_msgget((long long)(key), (long long)(msgflg))
-#define __sanitizer_syscall_post_msgget(res, key, msgflg)                      \
-  __sanitizer_syscall_post_impl_msgget(res, (long long)(key),                  \
-                                       (long long)(msgflg))
-#define __sanitizer_syscall_pre_msgsnd(msqid, msgp, msgsz, msgflg)             \
-  __sanitizer_syscall_pre_impl_msgsnd((long long)(msqid), (long long)(msgp),   \
-                                      (long long)(msgsz), (long long)(msgflg))
-#define __sanitizer_syscall_post_msgsnd(res, msqid, msgp, msgsz, msgflg)       \
-  __sanitizer_syscall_post_impl_msgsnd(res, (long long)(msqid),                \
-                                       (long long)(msgp), (long long)(msgsz),  \
-                                       (long long)(msgflg))
-#define __sanitizer_syscall_pre_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg)     \
-  __sanitizer_syscall_pre_impl_msgrcv((long long)(msqid), (long long)(msgp),   \
-                                      (long long)(msgsz), (long long)(msgtyp), \
-                                      (long long)(msgflg))
-#define __sanitizer_syscall_post_msgrcv(res, msqid, msgp, msgsz, msgtyp,       \
-                                        msgflg)                                \
-  __sanitizer_syscall_post_impl_msgrcv(                                        \
-      res, (long long)(msqid), (long long)(msgp), (long long)(msgsz),          \
-      (long long)(msgtyp), (long long)(msgflg))
-#define __sanitizer_syscall_pre_shmat(shmid, shmaddr, shmflg)                  \
-  __sanitizer_syscall_pre_impl_shmat((long long)(shmid), (long long)(shmaddr), \
-                                     (long long)(shmflg))
-#define __sanitizer_syscall_post_shmat(res, shmid, shmaddr, shmflg)            \
-  __sanitizer_syscall_post_impl_shmat(                                         \
-      res, (long long)(shmid), (long long)(shmaddr), (long long)(shmflg))
-#define __sanitizer_syscall_pre_compat_14_shmctl(shmid, cmd, buf)              \
-  __sanitizer_syscall_pre_impl_compat_14_shmctl(                               \
-      (long long)(shmid), (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_post_compat_14_shmctl(res, shmid, cmd, buf)        \
-  __sanitizer_syscall_post_impl_compat_14_shmctl(                              \
-      res, (long long)(shmid), (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_pre_shmdt(shmaddr)                                 \
-  __sanitizer_syscall_pre_impl_shmdt((long long)(shmaddr))
-#define __sanitizer_syscall_post_shmdt(res, shmaddr)                           \
-  __sanitizer_syscall_post_impl_shmdt(res, (long long)(shmaddr))
-#define __sanitizer_syscall_pre_shmget(key, size, shmflg)                      \
-  __sanitizer_syscall_pre_impl_shmget((long long)(key), (long long)(size),     \
-                                      (long long)(shmflg))
-#define __sanitizer_syscall_post_shmget(res, key, size, shmflg)                \
-  __sanitizer_syscall_post_impl_shmget(res, (long long)(key),                  \
-                                       (long long)(size), (long long)(shmflg))
-#define __sanitizer_syscall_pre_compat_50_clock_gettime(clock_id, tp)          \
-  __sanitizer_syscall_pre_impl_compat_50_clock_gettime((long long)(clock_id),  \
-                                                       (long long)(tp))
-#define __sanitizer_syscall_post_compat_50_clock_gettime(res, clock_id, tp)    \
-  __sanitizer_syscall_post_impl_compat_50_clock_gettime(                       \
-      res, (long long)(clock_id), (long long)(tp))
-#define __sanitizer_syscall_pre_compat_50_clock_settime(clock_id, tp)          \
-  __sanitizer_syscall_pre_impl_compat_50_clock_settime((long long)(clock_id),  \
-                                                       (long long)(tp))
-#define __sanitizer_syscall_post_compat_50_clock_settime(res, clock_id, tp)    \
-  __sanitizer_syscall_post_impl_compat_50_clock_settime(                       \
-      res, (long long)(clock_id), (long long)(tp))
-#define __sanitizer_syscall_pre_compat_50_clock_getres(clock_id, tp)           \
-  __sanitizer_syscall_pre_impl_compat_50_clock_getres((long long)(clock_id),   \
-                                                      (long long)(tp))
-#define __sanitizer_syscall_post_compat_50_clock_getres(res, clock_id, tp)     \
-  __sanitizer_syscall_post_impl_compat_50_clock_getres(                        \
-      res, (long long)(clock_id), (long long)(tp))
-#define __sanitizer_syscall_pre_timer_create(clock_id, evp, timerid)           \
-  __sanitizer_syscall_pre_impl_timer_create(                                   \
-      (long long)(clock_id), (long long)(evp), (long long)(timerid))
-#define __sanitizer_syscall_post_timer_create(res, clock_id, evp, timerid)     \
-  __sanitizer_syscall_post_impl_timer_create(                                  \
-      res, (long long)(clock_id), (long long)(evp), (long long)(timerid))
-#define __sanitizer_syscall_pre_timer_delete(timerid)                          \
-  __sanitizer_syscall_pre_impl_timer_delete((long long)(timerid))
-#define __sanitizer_syscall_post_timer_delete(res, timerid)                    \
-  __sanitizer_syscall_post_impl_timer_delete(res, (long long)(timerid))
-#define __sanitizer_syscall_pre_compat_50_timer_settime(timerid, flags, value, \
-                                                        ovalue)                \
-  __sanitizer_syscall_pre_impl_compat_50_timer_settime(                        \
-      (long long)(timerid), (long long)(flags), (long long)(value),            \
-      (long long)(ovalue))
-#define __sanitizer_syscall_post_compat_50_timer_settime(res, timerid, flags,  \
-                                                         value, ovalue)        \
-  __sanitizer_syscall_post_impl_compat_50_timer_settime(                       \
-      res, (long long)(timerid), (long long)(flags), (long long)(value),       \
-      (long long)(ovalue))
-#define __sanitizer_syscall_pre_compat_50_timer_gettime(timerid, value)        \
-  __sanitizer_syscall_pre_impl_compat_50_timer_gettime((long long)(timerid),   \
-                                                       (long long)(value))
-#define __sanitizer_syscall_post_compat_50_timer_gettime(res, timerid, value)  \
-  __sanitizer_syscall_post_impl_compat_50_timer_gettime(                       \
-      res, (long long)(timerid), (long long)(value))
-#define __sanitizer_syscall_pre_timer_getoverrun(timerid)                      \
-  __sanitizer_syscall_pre_impl_timer_getoverrun((long long)(timerid))
-#define __sanitizer_syscall_post_timer_getoverrun(res, timerid)                \
-  __sanitizer_syscall_post_impl_timer_getoverrun(res, (long long)(timerid))
-#define __sanitizer_syscall_pre_compat_50_nanosleep(rqtp, rmtp)                \
-  __sanitizer_syscall_pre_impl_compat_50_nanosleep((long long)(rqtp),          \
-                                                   (long long)(rmtp))
-#define __sanitizer_syscall_post_compat_50_nanosleep(res, rqtp, rmtp)          \
-  __sanitizer_syscall_post_impl_compat_50_nanosleep(res, (long long)(rqtp),    \
-                                                    (long long)(rmtp))
-#define __sanitizer_syscall_pre_fdatasync(fd)                                  \
-  __sanitizer_syscall_pre_impl_fdatasync((long long)(fd))
-#define __sanitizer_syscall_post_fdatasync(res, fd)                            \
-  __sanitizer_syscall_post_impl_fdatasync(res, (long long)(fd))
-#define __sanitizer_syscall_pre_mlockall(flags)                                \
-  __sanitizer_syscall_pre_impl_mlockall((long long)(flags))
-#define __sanitizer_syscall_post_mlockall(res, flags)                          \
-  __sanitizer_syscall_post_impl_mlockall(res, (long long)(flags))
-#define __sanitizer_syscall_pre_munlockall()                                   \
-  __sanitizer_syscall_pre_impl_munlockall()
-#define __sanitizer_syscall_post_munlockall(res)                               \
-  __sanitizer_syscall_post_impl_munlockall(res)
-#define __sanitizer_syscall_pre_compat_50___sigtimedwait(set, info, timeout)   \
-  __sanitizer_syscall_pre_impl_compat_50___sigtimedwait(                       \
-      (long long)(set), (long long)(info), (long long)(timeout))
-#define __sanitizer_syscall_post_compat_50___sigtimedwait(res, set, info,      \
-                                                          timeout)             \
-  __sanitizer_syscall_post_impl_compat_50___sigtimedwait(                      \
-      res, (long long)(set), (long long)(info), (long long)(timeout))
-#define __sanitizer_syscall_pre_sigqueueinfo(pid, info)                        \
-  __sanitizer_syscall_pre_impl_sigqueueinfo((long long)(pid), (long long)(info))
-#define __sanitizer_syscall_post_sigqueueinfo(res, pid, info)                  \
-  __sanitizer_syscall_post_impl_sigqueueinfo(res, (long long)(pid),            \
-                                             (long long)(info))
-#define __sanitizer_syscall_pre_modctl(cmd, arg)                               \
-  __sanitizer_syscall_pre_impl_modctl((long long)(cmd), (long long)(arg))
-#define __sanitizer_syscall_post_modctl(res, cmd, arg)                         \
-  __sanitizer_syscall_post_impl_modctl(res, (long long)(cmd), (long long)(arg))
-#define __sanitizer_syscall_pre__ksem_init(value, idp)                         \
-  __sanitizer_syscall_pre_impl__ksem_init((long long)(value), (long long)(idp))
-#define __sanitizer_syscall_post__ksem_init(res, value, idp)                   \
-  __sanitizer_syscall_post_impl__ksem_init(res, (long long)(value),            \
-                                           (long long)(idp))
-#define __sanitizer_syscall_pre__ksem_open(name, oflag, mode, value, idp)      \
-  __sanitizer_syscall_pre_impl__ksem_open(                                     \
-      (long long)(name), (long long)(oflag), (long long)(mode),                \
-      (long long)(value), (long long)(idp))
-#define __sanitizer_syscall_post__ksem_open(res, name, oflag, mode, value,     \
-                                            idp)                               \
-  __sanitizer_syscall_post_impl__ksem_open(                                    \
-      res, (long long)(name), (long long)(oflag), (long long)(mode),           \
-      (long long)(value), (long long)(idp))
-#define __sanitizer_syscall_pre__ksem_unlink(name)                             \
-  __sanitizer_syscall_pre_impl__ksem_unlink((long long)(name))
-#define __sanitizer_syscall_post__ksem_unlink(res, name)                       \
-  __sanitizer_syscall_post_impl__ksem_unlink(res, (long long)(name))
-#define __sanitizer_syscall_pre__ksem_close(id)                                \
-  __sanitizer_syscall_pre_impl__ksem_close((long long)(id))
-#define __sanitizer_syscall_post__ksem_close(res, id)                          \
-  __sanitizer_syscall_post_impl__ksem_close(res, (long long)(id))
-#define __sanitizer_syscall_pre__ksem_post(id)                                 \
-  __sanitizer_syscall_pre_impl__ksem_post((long long)(id))
-#define __sanitizer_syscall_post__ksem_post(res, id)                           \
-  __sanitizer_syscall_post_impl__ksem_post(res, (long long)(id))
-#define __sanitizer_syscall_pre__ksem_wait(id)                                 \
-  __sanitizer_syscall_pre_impl__ksem_wait((long long)(id))
-#define __sanitizer_syscall_post__ksem_wait(res, id)                           \
-  __sanitizer_syscall_post_impl__ksem_wait(res, (long long)(id))
-#define __sanitizer_syscall_pre__ksem_trywait(id)                              \
-  __sanitizer_syscall_pre_impl__ksem_trywait((long long)(id))
-#define __sanitizer_syscall_post__ksem_trywait(res, id)                        \
-  __sanitizer_syscall_post_impl__ksem_trywait(res, (long long)(id))
-#define __sanitizer_syscall_pre__ksem_getvalue(id, value)                      \
-  __sanitizer_syscall_pre_impl__ksem_getvalue((long long)(id),                 \
-                                              (long long)(value))
-#define __sanitizer_syscall_post__ksem_getvalue(res, id, value)                \
-  __sanitizer_syscall_post_impl__ksem_getvalue(res, (long long)(id),           \
-                                               (long long)(value))
-#define __sanitizer_syscall_pre__ksem_destroy(id)                              \
-  __sanitizer_syscall_pre_impl__ksem_destroy((long long)(id))
-#define __sanitizer_syscall_post__ksem_destroy(res, id)                        \
-  __sanitizer_syscall_post_impl__ksem_destroy(res, (long long)(id))
-#define __sanitizer_syscall_pre__ksem_timedwait(id, abstime)                   \
-  __sanitizer_syscall_pre_impl__ksem_timedwait((long long)(id),                \
-                                               (long long)(abstime))
-#define __sanitizer_syscall_post__ksem_timedwait(res, id, abstime)             \
-  __sanitizer_syscall_post_impl__ksem_timedwait(res, (long long)(id),          \
-                                                (long long)(abstime))
-#define __sanitizer_syscall_pre_mq_open(name, oflag, mode, attr)               \
-  __sanitizer_syscall_pre_impl_mq_open((long long)(name), (long long)(oflag),  \
-                                       (long long)(mode), (long long)(attr))
-#define __sanitizer_syscall_post_mq_open(res, name, oflag, mode, attr)         \
-  __sanitizer_syscall_post_impl_mq_open(res, (long long)(name),                \
-                                        (long long)(oflag), (long long)(mode), \
-                                        (long long)(attr))
-#define __sanitizer_syscall_pre_mq_close(mqdes)                                \
-  __sanitizer_syscall_pre_impl_mq_close((long long)(mqdes))
-#define __sanitizer_syscall_post_mq_close(res, mqdes)                          \
-  __sanitizer_syscall_post_impl_mq_close(res, (long long)(mqdes))
-#define __sanitizer_syscall_pre_mq_unlink(name)                                \
-  __sanitizer_syscall_pre_impl_mq_unlink((long long)(name))
-#define __sanitizer_syscall_post_mq_unlink(res, name)                          \
-  __sanitizer_syscall_post_impl_mq_unlink(res, (long long)(name))
-#define __sanitizer_syscall_pre_mq_getattr(mqdes, mqstat)                      \
-  __sanitizer_syscall_pre_impl_mq_getattr((long long)(mqdes),                  \
-                                          (long long)(mqstat))
-#define __sanitizer_syscall_post_mq_getattr(res, mqdes, mqstat)                \
-  __sanitizer_syscall_post_impl_mq_getattr(res, (long long)(mqdes),            \
-                                           (long long)(mqstat))
-#define __sanitizer_syscall_pre_mq_setattr(mqdes, mqstat, omqstat)             \
-  __sanitizer_syscall_pre_impl_mq_setattr(                                     \
-      (long long)(mqdes), (long long)(mqstat), (long long)(omqstat))
-#define __sanitizer_syscall_post_mq_setattr(res, mqdes, mqstat, omqstat)       \
-  __sanitizer_syscall_post_impl_mq_setattr(                                    \
-      res, (long long)(mqdes), (long long)(mqstat), (long long)(omqstat))
-#define __sanitizer_syscall_pre_mq_notify(mqdes, notification)                 \
-  __sanitizer_syscall_pre_impl_mq_notify((long long)(mqdes),                   \
-                                         (long long)(notification))
-#define __sanitizer_syscall_post_mq_notify(res, mqdes, notification)           \
-  __sanitizer_syscall_post_impl_mq_notify(res, (long long)(mqdes),             \
-                                          (long long)(notification))
-#define __sanitizer_syscall_pre_mq_send(mqdes, msg_ptr, msg_len, msg_prio)     \
-  __sanitizer_syscall_pre_impl_mq_send(                                        \
-      (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),          \
-      (long long)(msg_prio))
-#define __sanitizer_syscall_post_mq_send(res, mqdes, msg_ptr, msg_len,         \
-                                         msg_prio)                             \
-  __sanitizer_syscall_post_impl_mq_send(                                       \
-      res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),     \
-      (long long)(msg_prio))
-#define __sanitizer_syscall_pre_mq_receive(mqdes, msg_ptr, msg_len, msg_prio)  \
-  __sanitizer_syscall_pre_impl_mq_receive(                                     \
-      (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),          \
-      (long long)(msg_prio))
-#define __sanitizer_syscall_post_mq_receive(res, mqdes, msg_ptr, msg_len,      \
-                                            msg_prio)                          \
-  __sanitizer_syscall_post_impl_mq_receive(                                    \
-      res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),     \
-      (long long)(msg_prio))
-#define __sanitizer_syscall_pre_compat_50_mq_timedsend(                        \
-    mqdes, msg_ptr, msg_len, msg_prio, abs_timeout)                            \
-  __sanitizer_syscall_pre_impl_compat_50_mq_timedsend(                         \
-      (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),          \
-      (long long)(msg_prio), (long long)(abs_timeout))
-#define __sanitizer_syscall_post_compat_50_mq_timedsend(                       \
-    res, mqdes, msg_ptr, msg_len, msg_prio, abs_timeout)                       \
-  __sanitizer_syscall_post_impl_compat_50_mq_timedsend(                        \
-      res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),     \
-      (long long)(msg_prio), (long long)(abs_timeout))
-#define __sanitizer_syscall_pre_compat_50_mq_timedreceive(                     \
-    mqdes, msg_ptr, msg_len, msg_prio, abs_timeout)                            \
-  __sanitizer_syscall_pre_impl_compat_50_mq_timedreceive(                      \
-      (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),          \
-      (long long)(msg_prio), (long long)(abs_timeout))
-#define __sanitizer_syscall_post_compat_50_mq_timedreceive(                    \
-    res, mqdes, msg_ptr, msg_len, msg_prio, abs_timeout)                       \
-  __sanitizer_syscall_post_impl_compat_50_mq_timedreceive(                     \
-      res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),     \
-      (long long)(msg_prio), (long long)(abs_timeout))
-/* syscall 267 has been skipped */
-/* syscall 268 has been skipped */
-/* syscall 269 has been skipped */
-#define __sanitizer_syscall_pre___posix_rename(from, to)                       \
-  __sanitizer_syscall_pre_impl___posix_rename((long long)(from),               \
-                                              (long long)(to))
-#define __sanitizer_syscall_post___posix_rename(res, from, to)                 \
-  __sanitizer_syscall_post_impl___posix_rename(res, (long long)(from),         \
-                                               (long long)(to))
-#define __sanitizer_syscall_pre_swapctl(cmd, arg, misc)                        \
-  __sanitizer_syscall_pre_impl_swapctl((long long)(cmd), (long long)(arg),     \
-                                       (long long)(misc))
-#define __sanitizer_syscall_post_swapctl(res, cmd, arg, misc)                  \
-  __sanitizer_syscall_post_impl_swapctl(res, (long long)(cmd),                 \
-                                        (long long)(arg), (long long)(misc))
-#define __sanitizer_syscall_pre_compat_30_getdents(fd, buf, count)             \
-  __sanitizer_syscall_pre_impl_compat_30_getdents(                             \
-      (long long)(fd), (long long)(buf), (long long)(count))
-#define __sanitizer_syscall_post_compat_30_getdents(res, fd, buf, count)       \
-  __sanitizer_syscall_post_impl_compat_30_getdents(                            \
-      res, (long long)(fd), (long long)(buf), (long long)(count))
-#define __sanitizer_syscall_pre_minherit(addr, len, inherit)                   \
-  __sanitizer_syscall_pre_impl_minherit((long long)(addr), (long long)(len),   \
-                                        (long long)(inherit))
-#define __sanitizer_syscall_post_minherit(res, addr, len, inherit)             \
-  __sanitizer_syscall_post_impl_minherit(                                      \
-      res, (long long)(addr), (long long)(len), (long long)(inherit))
-#define __sanitizer_syscall_pre_lchmod(path, mode)                             \
-  __sanitizer_syscall_pre_impl_lchmod((long long)(path), (long long)(mode))
-#define __sanitizer_syscall_post_lchmod(res, path, mode)                       \
-  __sanitizer_syscall_post_impl_lchmod(res, (long long)(path),                 \
-                                       (long long)(mode))
-#define __sanitizer_syscall_pre_lchown(path, uid, gid)                         \
-  __sanitizer_syscall_pre_impl_lchown((long long)(path), (long long)(uid),     \
-                                      (long long)(gid))
-#define __sanitizer_syscall_post_lchown(res, path, uid, gid)                   \
-  __sanitizer_syscall_post_impl_lchown(res, (long long)(path),                 \
-                                       (long long)(uid), (long long)(gid))
-#define __sanitizer_syscall_pre_compat_50_lutimes(path, tptr)                  \
-  __sanitizer_syscall_pre_impl_compat_50_lutimes((long long)(path),            \
-                                                 (long long)(tptr))
-#define __sanitizer_syscall_post_compat_50_lutimes(res, path, tptr)            \
-  __sanitizer_syscall_post_impl_compat_50_lutimes(res, (long long)(path),      \
-                                                  (long long)(tptr))
-#define __sanitizer_syscall_pre___msync13(addr, len, flags)                    \
-  __sanitizer_syscall_pre_impl___msync13((long long)(addr), (long long)(len),  \
-                                         (long long)(flags))
-#define __sanitizer_syscall_post___msync13(res, addr, len, flags)              \
-  __sanitizer_syscall_post_impl___msync13(                                     \
-      res, (long long)(addr), (long long)(len), (long long)(flags))
-#define __sanitizer_syscall_pre_compat_30___stat13(path, ub)                   \
-  __sanitizer_syscall_pre_impl_compat_30___stat13((long long)(path),           \
-                                                  (long long)(ub))
-#define __sanitizer_syscall_post_compat_30___stat13(res, path, ub)             \
-  __sanitizer_syscall_post_impl_compat_30___stat13(res, (long long)(path),     \
-                                                   (long long)(ub))
-#define __sanitizer_syscall_pre_compat_30___fstat13(fd, sb)                    \
-  __sanitizer_syscall_pre_impl_compat_30___fstat13((long long)(fd),            \
-                                                   (long long)(sb))
-#define __sanitizer_syscall_post_compat_30___fstat13(res, fd, sb)              \
-  __sanitizer_syscall_post_impl_compat_30___fstat13(res, (long long)(fd),      \
-                                                    (long long)(sb))
-#define __sanitizer_syscall_pre_compat_30___lstat13(path, ub)                  \
-  __sanitizer_syscall_pre_impl_compat_30___lstat13((long long)(path),          \
-                                                   (long long)(ub))
-#define __sanitizer_syscall_post_compat_30___lstat13(res, path, ub)            \
-  __sanitizer_syscall_post_impl_compat_30___lstat13(res, (long long)(path),    \
-                                                    (long long)(ub))
-#define __sanitizer_syscall_pre___sigaltstack14(nss, oss)                      \
-  __sanitizer_syscall_pre_impl___sigaltstack14((long long)(nss),               \
-                                               (long long)(oss))
-#define __sanitizer_syscall_post___sigaltstack14(res, nss, oss)                \
-  __sanitizer_syscall_post_impl___sigaltstack14(res, (long long)(nss),         \
-                                                (long long)(oss))
-#define __sanitizer_syscall_pre___vfork14()                                    \
-  __sanitizer_syscall_pre_impl___vfork14()
-#define __sanitizer_syscall_post___vfork14(res)                                \
-  __sanitizer_syscall_post_impl___vfork14(res)
-#define __sanitizer_syscall_pre___posix_chown(path, uid, gid)                  \
-  __sanitizer_syscall_pre_impl___posix_chown(                                  \
-      (long long)(path), (long long)(uid), (long long)(gid))
-#define __sanitizer_syscall_post___posix_chown(res, path, uid, gid)            \
-  __sanitizer_syscall_post_impl___posix_chown(                                 \
-      res, (long long)(path), (long long)(uid), (long long)(gid))
-#define __sanitizer_syscall_pre___posix_fchown(fd, uid, gid)                   \
-  __sanitizer_syscall_pre_impl___posix_fchown(                                 \
-      (long long)(fd), (long long)(uid), (long long)(gid))
-#define __sanitizer_syscall_post___posix_fchown(res, fd, uid, gid)             \
-  __sanitizer_syscall_post_impl___posix_fchown(                                \
-      res, (long long)(fd), (long long)(uid), (long long)(gid))
-#define __sanitizer_syscall_pre___posix_lchown(path, uid, gid)                 \
-  __sanitizer_syscall_pre_impl___posix_lchown(                                 \
-      (long long)(path), (long long)(uid), (long long)(gid))
-#define __sanitizer_syscall_post___posix_lchown(res, path, uid, gid)           \
-  __sanitizer_syscall_post_impl___posix_lchown(                                \
-      res, (long long)(path), (long long)(uid), (long long)(gid))
-#define __sanitizer_syscall_pre_getsid(pid)                                    \
-  __sanitizer_syscall_pre_impl_getsid((long long)(pid))
-#define __sanitizer_syscall_post_getsid(res, pid)                              \
-  __sanitizer_syscall_post_impl_getsid(res, (long long)(pid))
-#define __sanitizer_syscall_pre___clone(flags, stack)                          \
-  __sanitizer_syscall_pre_impl___clone((long long)(flags), (long long)(stack))
-#define __sanitizer_syscall_post___clone(res, flags, stack)                    \
-  __sanitizer_syscall_post_impl___clone(res, (long long)(flags),               \
-                                        (long long)(stack))
-#define __sanitizer_syscall_pre_fktrace(fd, ops, facs, pid)                    \
-  __sanitizer_syscall_pre_impl_fktrace((long long)(fd), (long long)(ops),      \
-                                       (long long)(facs), (long long)(pid))
-#define __sanitizer_syscall_post_fktrace(res, fd, ops, facs, pid)              \
-  __sanitizer_syscall_post_impl_fktrace(res, (long long)(fd),                  \
-                                        (long long)(ops), (long long)(facs),   \
-                                        (long long)(pid))
-#define __sanitizer_syscall_pre_preadv(fd, iovp, iovcnt, PAD, offset)          \
-  __sanitizer_syscall_pre_impl_preadv((long long)(fd), (long long)(iovp),      \
-                                      (long long)(iovcnt), (long long)(PAD),   \
-                                      (long long)(offset))
-#define __sanitizer_syscall_post_preadv(res, fd, iovp, iovcnt, PAD, offset)    \
-  __sanitizer_syscall_post_impl_preadv(res, (long long)(fd),                   \
-                                       (long long)(iovp), (long long)(iovcnt), \
-                                       (long long)(PAD), (long long)(offset))
-#define __sanitizer_syscall_pre_pwritev(fd, iovp, iovcnt, PAD, offset)         \
-  __sanitizer_syscall_pre_impl_pwritev((long long)(fd), (long long)(iovp),     \
-                                       (long long)(iovcnt), (long long)(PAD),  \
-                                       (long long)(offset))
-#define __sanitizer_syscall_post_pwritev(res, fd, iovp, iovcnt, PAD, offset)   \
-  __sanitizer_syscall_post_impl_pwritev(                                       \
-      res, (long long)(fd), (long long)(iovp), (long long)(iovcnt),            \
-      (long long)(PAD), (long long)(offset))
-#define __sanitizer_syscall_pre_compat_16___sigaction14(signum, nsa, osa)      \
-  __sanitizer_syscall_pre_impl_compat_16___sigaction14(                        \
-      (long long)(signum), (long long)(nsa), (long long)(osa))
-#define __sanitizer_syscall_post_compat_16___sigaction14(res, signum, nsa,     \
-                                                         osa)                  \
-  __sanitizer_syscall_post_impl_compat_16___sigaction14(                       \
-      res, (long long)(signum), (long long)(nsa), (long long)(osa))
-#define __sanitizer_syscall_pre___sigpending14(set)                            \
-  __sanitizer_syscall_pre_impl___sigpending14((long long)(set))
-#define __sanitizer_syscall_post___sigpending14(res, set)                      \
-  __sanitizer_syscall_post_impl___sigpending14(res, (long long)(set))
-#define __sanitizer_syscall_pre___sigprocmask14(how, set, oset)                \
-  __sanitizer_syscall_pre_impl___sigprocmask14(                                \
-      (long long)(how), (long long)(set), (long long)(oset))
-#define __sanitizer_syscall_post___sigprocmask14(res, how, set, oset)          \
-  __sanitizer_syscall_post_impl___sigprocmask14(                               \
-      res, (long long)(how), (long long)(set), (long long)(oset))
-#define __sanitizer_syscall_pre___sigsuspend14(set)                            \
-  __sanitizer_syscall_pre_impl___sigsuspend14((long long)(set))
-#define __sanitizer_syscall_post___sigsuspend14(res, set)                      \
-  __sanitizer_syscall_post_impl___sigsuspend14(res, (long long)(set))
-#define __sanitizer_syscall_pre_compat_16___sigreturn14(sigcntxp)              \
-  __sanitizer_syscall_pre_impl_compat_16___sigreturn14((long long)(sigcntxp))
-#define __sanitizer_syscall_post_compat_16___sigreturn14(res, sigcntxp)        \
-  __sanitizer_syscall_post_impl_compat_16___sigreturn14(res,                   \
-                                                        (long long)(sigcntxp))
-#define __sanitizer_syscall_pre___getcwd(bufp, length)                         \
-  __sanitizer_syscall_pre_impl___getcwd((long long)(bufp), (long long)(length))
-#define __sanitizer_syscall_post___getcwd(res, bufp, length)                   \
-  __sanitizer_syscall_post_impl___getcwd(res, (long long)(bufp),               \
-                                         (long long)(length))
-#define __sanitizer_syscall_pre_fchroot(fd)                                    \
-  __sanitizer_syscall_pre_impl_fchroot((long long)(fd))
-#define __sanitizer_syscall_post_fchroot(res, fd)                              \
-  __sanitizer_syscall_post_impl_fchroot(res, (long long)(fd))
-#define __sanitizer_syscall_pre_compat_30_fhopen(fhp, flags)                   \
-  __sanitizer_syscall_pre_impl_compat_30_fhopen((long long)(fhp),              \
-                                                (long long)(flags))
-#define __sanitizer_syscall_post_compat_30_fhopen(res, fhp, flags)             \
-  __sanitizer_syscall_post_impl_compat_30_fhopen(res, (long long)(fhp),        \
-                                                 (long long)(flags))
-#define __sanitizer_syscall_pre_compat_30_fhstat(fhp, sb)                      \
-  __sanitizer_syscall_pre_impl_compat_30_fhstat((long long)(fhp),              \
-                                                (long long)(sb))
-#define __sanitizer_syscall_post_compat_30_fhstat(res, fhp, sb)                \
-  __sanitizer_syscall_post_impl_compat_30_fhstat(res, (long long)(fhp),        \
-                                                 (long long)(sb))
-#define __sanitizer_syscall_pre_compat_20_fhstatfs(fhp, buf)                   \
-  __sanitizer_syscall_pre_impl_compat_20_fhstatfs((long long)(fhp),            \
-                                                  (long long)(buf))
-#define __sanitizer_syscall_post_compat_20_fhstatfs(res, fhp, buf)             \
-  __sanitizer_syscall_post_impl_compat_20_fhstatfs(res, (long long)(fhp),      \
-                                                   (long long)(buf))
-#define __sanitizer_syscall_pre_compat_50_____semctl13(semid, semnum, cmd,     \
-                                                       arg)                    \
-  __sanitizer_syscall_pre_impl_compat_50_____semctl13(                         \
-      (long long)(semid), (long long)(semnum), (long long)(cmd),               \
-      (long long)(arg))
-#define __sanitizer_syscall_post_compat_50_____semctl13(res, semid, semnum,    \
-                                                        cmd, arg)              \
-  __sanitizer_syscall_post_impl_compat_50_____semctl13(                        \
-      res, (long long)(semid), (long long)(semnum), (long long)(cmd),          \
-      (long long)(arg))
-#define __sanitizer_syscall_pre_compat_50___msgctl13(msqid, cmd, buf)          \
-  __sanitizer_syscall_pre_impl_compat_50___msgctl13(                           \
-      (long long)(msqid), (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_post_compat_50___msgctl13(res, msqid, cmd, buf)    \
-  __sanitizer_syscall_post_impl_compat_50___msgctl13(                          \
-      res, (long long)(msqid), (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_pre_compat_50___shmctl13(shmid, cmd, buf)          \
-  __sanitizer_syscall_pre_impl_compat_50___shmctl13(                           \
-      (long long)(shmid), (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_post_compat_50___shmctl13(res, shmid, cmd, buf)    \
-  __sanitizer_syscall_post_impl_compat_50___shmctl13(                          \
-      res, (long long)(shmid), (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_pre_lchflags(path, flags)                          \
-  __sanitizer_syscall_pre_impl_lchflags((long long)(path), (long long)(flags))
-#define __sanitizer_syscall_post_lchflags(res, path, flags)                    \
-  __sanitizer_syscall_post_impl_lchflags(res, (long long)(path),               \
-                                         (long long)(flags))
-#define __sanitizer_syscall_pre_issetugid()                                    \
-  __sanitizer_syscall_pre_impl_issetugid()
-#define __sanitizer_syscall_post_issetugid(res)                                \
-  __sanitizer_syscall_post_impl_issetugid(res)
-#define __sanitizer_syscall_pre_utrace(label, addr, len)                       \
-  __sanitizer_syscall_pre_impl_utrace((long long)(label), (long long)(addr),   \
-                                      (long long)(len))
-#define __sanitizer_syscall_post_utrace(res, label, addr, len)                 \
-  __sanitizer_syscall_post_impl_utrace(res, (long long)(label),                \
-                                       (long long)(addr), (long long)(len))
-#define __sanitizer_syscall_pre_getcontext(ucp)                                \
-  __sanitizer_syscall_pre_impl_getcontext((long long)(ucp))
-#define __sanitizer_syscall_post_getcontext(res, ucp)                          \
-  __sanitizer_syscall_post_impl_getcontext(res, (long long)(ucp))
-#define __sanitizer_syscall_pre_setcontext(ucp)                                \
-  __sanitizer_syscall_pre_impl_setcontext((long long)(ucp))
-#define __sanitizer_syscall_post_setcontext(res, ucp)                          \
-  __sanitizer_syscall_post_impl_setcontext(res, (long long)(ucp))
-#define __sanitizer_syscall_pre__lwp_create(ucp, flags, new_lwp)               \
-  __sanitizer_syscall_pre_impl__lwp_create(                                    \
-      (long long)(ucp), (long long)(flags), (long long)(new_lwp))
-#define __sanitizer_syscall_post__lwp_create(res, ucp, flags, new_lwp)         \
-  __sanitizer_syscall_post_impl__lwp_create(                                   \
-      res, (long long)(ucp), (long long)(flags), (long long)(new_lwp))
-#define __sanitizer_syscall_pre__lwp_exit()                                    \
-  __sanitizer_syscall_pre_impl__lwp_exit()
-#define __sanitizer_syscall_post__lwp_exit(res)                                \
-  __sanitizer_syscall_post_impl__lwp_exit(res)
-#define __sanitizer_syscall_pre__lwp_self()                                    \
-  __sanitizer_syscall_pre_impl__lwp_self()
-#define __sanitizer_syscall_post__lwp_self(res)                                \
-  __sanitizer_syscall_post_impl__lwp_self(res)
-#define __sanitizer_syscall_pre__lwp_wait(wait_for, departed)                  \
-  __sanitizer_syscall_pre_impl__lwp_wait((long long)(wait_for),                \
-                                         (long long)(departed))
-#define __sanitizer_syscall_post__lwp_wait(res, wait_for, departed)            \
-  __sanitizer_syscall_post_impl__lwp_wait(res, (long long)(wait_for),          \
-                                          (long long)(departed))
-#define __sanitizer_syscall_pre__lwp_suspend(target)                           \
-  __sanitizer_syscall_pre_impl__lwp_suspend((long long)(target))
-#define __sanitizer_syscall_post__lwp_suspend(res, target)                     \
-  __sanitizer_syscall_post_impl__lwp_suspend(res, (long long)(target))
-#define __sanitizer_syscall_pre__lwp_continue(target)                          \
-  __sanitizer_syscall_pre_impl__lwp_continue((long long)(target))
-#define __sanitizer_syscall_post__lwp_continue(res, target)                    \
-  __sanitizer_syscall_post_impl__lwp_continue(res, (long long)(target))
-#define __sanitizer_syscall_pre__lwp_wakeup(target)                            \
-  __sanitizer_syscall_pre_impl__lwp_wakeup((long long)(target))
-#define __sanitizer_syscall_post__lwp_wakeup(res, target)                      \
-  __sanitizer_syscall_post_impl__lwp_wakeup(res, (long long)(target))
-#define __sanitizer_syscall_pre__lwp_getprivate()                              \
-  __sanitizer_syscall_pre_impl__lwp_getprivate()
-#define __sanitizer_syscall_post__lwp_getprivate(res)                          \
-  __sanitizer_syscall_post_impl__lwp_getprivate(res)
-#define __sanitizer_syscall_pre__lwp_setprivate(ptr)                           \
-  __sanitizer_syscall_pre_impl__lwp_setprivate((long long)(ptr))
-#define __sanitizer_syscall_post__lwp_setprivate(res, ptr)                     \
-  __sanitizer_syscall_post_impl__lwp_setprivate(res, (long long)(ptr))
-#define __sanitizer_syscall_pre__lwp_kill(target, signo)                       \
-  __sanitizer_syscall_pre_impl__lwp_kill((long long)(target),                  \
-                                         (long long)(signo))
-#define __sanitizer_syscall_post__lwp_kill(res, target, signo)                 \
-  __sanitizer_syscall_post_impl__lwp_kill(res, (long long)(target),            \
-                                          (long long)(signo))
-#define __sanitizer_syscall_pre__lwp_detach(target)                            \
-  __sanitizer_syscall_pre_impl__lwp_detach((long long)(target))
-#define __sanitizer_syscall_post__lwp_detach(res, target)                      \
-  __sanitizer_syscall_post_impl__lwp_detach(res, (long long)(target))
-#define __sanitizer_syscall_pre_compat_50__lwp_park(ts, unpark, hint,          \
-                                                    unparkhint)                \
-  __sanitizer_syscall_pre_impl_compat_50__lwp_park(                            \
-      (long long)(ts), (long long)(unpark), (long long)(hint),                 \
-      (long long)(unparkhint))
-#define __sanitizer_syscall_post_compat_50__lwp_park(res, ts, unpark, hint,    \
-                                                     unparkhint)               \
-  __sanitizer_syscall_post_impl_compat_50__lwp_park(                           \
-      res, (long long)(ts), (long long)(unpark), (long long)(hint),            \
-      (long long)(unparkhint))
-#define __sanitizer_syscall_pre__lwp_unpark(target, hint)                      \
-  __sanitizer_syscall_pre_impl__lwp_unpark((long long)(target),                \
-                                           (long long)(hint))
-#define __sanitizer_syscall_post__lwp_unpark(res, target, hint)                \
-  __sanitizer_syscall_post_impl__lwp_unpark(res, (long long)(target),          \
-                                            (long long)(hint))
-#define __sanitizer_syscall_pre__lwp_unpark_all(targets, ntargets, hint)       \
-  __sanitizer_syscall_pre_impl__lwp_unpark_all(                                \
-      (long long)(targets), (long long)(ntargets), (long long)(hint))
-#define __sanitizer_syscall_post__lwp_unpark_all(res, targets, ntargets, hint) \
-  __sanitizer_syscall_post_impl__lwp_unpark_all(                               \
-      res, (long long)(targets), (long long)(ntargets), (long long)(hint))
-#define __sanitizer_syscall_pre__lwp_setname(target, name)                     \
-  __sanitizer_syscall_pre_impl__lwp_setname((long long)(target),               \
-                                            (long long)(name))
-#define __sanitizer_syscall_post__lwp_setname(res, target, name)               \
-  __sanitizer_syscall_post_impl__lwp_setname(res, (long long)(target),         \
-                                             (long long)(name))
-#define __sanitizer_syscall_pre__lwp_getname(target, name, len)                \
-  __sanitizer_syscall_pre_impl__lwp_getname(                                   \
-      (long long)(target), (long long)(name), (long long)(len))
-#define __sanitizer_syscall_post__lwp_getname(res, target, name, len)          \
-  __sanitizer_syscall_post_impl__lwp_getname(                                  \
-      res, (long long)(target), (long long)(name), (long long)(len))
-#define __sanitizer_syscall_pre__lwp_ctl(features, address)                    \
-  __sanitizer_syscall_pre_impl__lwp_ctl((long long)(features),                 \
-                                        (long long)(address))
-#define __sanitizer_syscall_post__lwp_ctl(res, features, address)              \
-  __sanitizer_syscall_post_impl__lwp_ctl(res, (long long)(features),           \
-                                         (long long)(address))
-/* syscall 326 has been skipped */
-/* syscall 327 has been skipped */
-/* syscall 328 has been skipped */
-/* syscall 329 has been skipped */
-#define __sanitizer_syscall_pre_compat_60_sa_register(newv, oldv, flags,       \
-                                                      stackinfo_offset)        \
-  __sanitizer_syscall_pre_impl_compat_60_sa_register(                          \
-      (long long)(newv), (long long)(oldv), (long long)(flags),                \
-      (long long)(stackinfo_offset))
-#define __sanitizer_syscall_post_compat_60_sa_register(res, newv, oldv, flags, \
-                                                       stackinfo_offset)       \
-  __sanitizer_syscall_post_impl_compat_60_sa_register(                         \
-      res, (long long)(newv), (long long)(oldv), (long long)(flags),           \
-      (long long)(stackinfo_offset))
-#define __sanitizer_syscall_pre_compat_60_sa_stacks(num, stacks)               \
-  __sanitizer_syscall_pre_impl_compat_60_sa_stacks((long long)(num),           \
-                                                   (long long)(stacks))
-#define __sanitizer_syscall_post_compat_60_sa_stacks(res, num, stacks)         \
-  __sanitizer_syscall_post_impl_compat_60_sa_stacks(res, (long long)(num),     \
-                                                    (long long)(stacks))
-#define __sanitizer_syscall_pre_compat_60_sa_enable()                          \
-  __sanitizer_syscall_pre_impl_compat_60_sa_enable()
-#define __sanitizer_syscall_post_compat_60_sa_enable(res)                      \
-  __sanitizer_syscall_post_impl_compat_60_sa_enable(res)
-#define __sanitizer_syscall_pre_compat_60_sa_setconcurrency(concurrency)       \
-  __sanitizer_syscall_pre_impl_compat_60_sa_setconcurrency(                    \
-      (long long)(concurrency))
-#define __sanitizer_syscall_post_compat_60_sa_setconcurrency(res, concurrency) \
-  __sanitizer_syscall_post_impl_compat_60_sa_setconcurrency(                   \
-      res, (long long)(concurrency))
-#define __sanitizer_syscall_pre_compat_60_sa_yield()                           \
-  __sanitizer_syscall_pre_impl_compat_60_sa_yield()
-#define __sanitizer_syscall_post_compat_60_sa_yield(res)                       \
-  __sanitizer_syscall_post_impl_compat_60_sa_yield(res)
-#define __sanitizer_syscall_pre_compat_60_sa_preempt(sa_id)                    \
-  __sanitizer_syscall_pre_impl_compat_60_sa_preempt((long long)(sa_id))
-#define __sanitizer_syscall_post_compat_60_sa_preempt(res, sa_id)              \
-  __sanitizer_syscall_post_impl_compat_60_sa_preempt(res, (long long)(sa_id))
-/* syscall 336 has been skipped */
-/* syscall 337 has been skipped */
-/* syscall 338 has been skipped */
-/* syscall 339 has been skipped */
-#define __sanitizer_syscall_pre___sigaction_sigtramp(signum, nsa, osa, tramp,  \
-                                                     vers)                     \
-  __sanitizer_syscall_pre_impl___sigaction_sigtramp(                           \
-      (long long)(signum), (long long)(nsa), (long long)(osa),                 \
-      (long long)(tramp), (long long)(vers))
-#define __sanitizer_syscall_post___sigaction_sigtramp(res, signum, nsa, osa,   \
-                                                      tramp, vers)             \
-  __sanitizer_syscall_post_impl___sigaction_sigtramp(                          \
-      res, (long long)(signum), (long long)(nsa), (long long)(osa),            \
-      (long long)(tramp), (long long)(vers))
-/* syscall 341 has been skipped */
-/* syscall 342 has been skipped */
-#define __sanitizer_syscall_pre_rasctl(addr, len, op)                          \
-  __sanitizer_syscall_pre_impl_rasctl((long long)(addr), (long long)(len),     \
-                                      (long long)(op))
-#define __sanitizer_syscall_post_rasctl(res, addr, len, op)                    \
-  __sanitizer_syscall_post_impl_rasctl(res, (long long)(addr),                 \
-                                       (long long)(len), (long long)(op))
-#define __sanitizer_syscall_pre_kqueue() __sanitizer_syscall_pre_impl_kqueue()
-#define __sanitizer_syscall_post_kqueue(res)                                   \
-  __sanitizer_syscall_post_impl_kqueue(res)
-#define __sanitizer_syscall_pre_compat_50_kevent(fd, changelist, nchanges,     \
-                                                 eventlist, nevents, timeout)  \
-  __sanitizer_syscall_pre_impl_compat_50_kevent(                               \
-      (long long)(fd), (long long)(changelist), (long long)(nchanges),         \
-      (long long)(eventlist), (long long)(nevents), (long long)(timeout))
-#define __sanitizer_syscall_post_compat_50_kevent(                             \
-    res, fd, changelist, nchanges, eventlist, nevents, timeout)                \
-  __sanitizer_syscall_post_impl_compat_50_kevent(                              \
-      res, (long long)(fd), (long long)(changelist), (long long)(nchanges),    \
-      (long long)(eventlist), (long long)(nevents), (long long)(timeout))
-#define __sanitizer_syscall_pre__sched_setparam(pid, lid, policy, params)      \
-  __sanitizer_syscall_pre_impl__sched_setparam(                                \
-      (long long)(pid), (long long)(lid), (long long)(policy),                 \
-      (long long)(params))
-#define __sanitizer_syscall_post__sched_setparam(res, pid, lid, policy,        \
-                                                 params)                       \
-  __sanitizer_syscall_post_impl__sched_setparam(                               \
-      res, (long long)(pid), (long long)(lid), (long long)(policy),            \
-      (long long)(params))
-#define __sanitizer_syscall_pre__sched_getparam(pid, lid, policy, params)      \
-  __sanitizer_syscall_pre_impl__sched_getparam(                                \
-      (long long)(pid), (long long)(lid), (long long)(policy),                 \
-      (long long)(params))
-#define __sanitizer_syscall_post__sched_getparam(res, pid, lid, policy,        \
-                                                 params)                       \
-  __sanitizer_syscall_post_impl__sched_getparam(                               \
-      res, (long long)(pid), (long long)(lid), (long long)(policy),            \
-      (long long)(params))
-#define __sanitizer_syscall_pre__sched_setaffinity(pid, lid, size, cpuset)     \
-  __sanitizer_syscall_pre_impl__sched_setaffinity(                             \
-      (long long)(pid), (long long)(lid), (long long)(size),                   \
-      (long long)(cpuset))
-#define __sanitizer_syscall_post__sched_setaffinity(res, pid, lid, size,       \
-                                                    cpuset)                    \
-  __sanitizer_syscall_post_impl__sched_setaffinity(                            \
-      res, (long long)(pid), (long long)(lid), (long long)(size),              \
-      (long long)(cpuset))
-#define __sanitizer_syscall_pre__sched_getaffinity(pid, lid, size, cpuset)     \
-  __sanitizer_syscall_pre_impl__sched_getaffinity(                             \
-      (long long)(pid), (long long)(lid), (long long)(size),                   \
-      (long long)(cpuset))
-#define __sanitizer_syscall_post__sched_getaffinity(res, pid, lid, size,       \
-                                                    cpuset)                    \
-  __sanitizer_syscall_post_impl__sched_getaffinity(                            \
-      res, (long long)(pid), (long long)(lid), (long long)(size),              \
-      (long long)(cpuset))
-#define __sanitizer_syscall_pre_sched_yield()                                  \
-  __sanitizer_syscall_pre_impl_sched_yield()
-#define __sanitizer_syscall_post_sched_yield(res)                              \
-  __sanitizer_syscall_post_impl_sched_yield(res)
-#define __sanitizer_syscall_pre__sched_protect(priority)                       \
-  __sanitizer_syscall_pre_impl__sched_protect((long long)(priority))
-#define __sanitizer_syscall_post__sched_protect(res, priority)                 \
-  __sanitizer_syscall_post_impl__sched_protect(res, (long long)(priority))
-/* syscall 352 has been skipped */
-/* syscall 353 has been skipped */
-#define __sanitizer_syscall_pre_fsync_range(fd, flags, start, length)          \
-  __sanitizer_syscall_pre_impl_fsync_range(                                    \
-      (long long)(fd), (long long)(flags), (long long)(start),                 \
-      (long long)(length))
-#define __sanitizer_syscall_post_fsync_range(res, fd, flags, start, length)    \
-  __sanitizer_syscall_post_impl_fsync_range(                                   \
-      res, (long long)(fd), (long long)(flags), (long long)(start),            \
-      (long long)(length))
-#define __sanitizer_syscall_pre_uuidgen(store, count)                          \
-  __sanitizer_syscall_pre_impl_uuidgen((long long)(store), (long long)(count))
-#define __sanitizer_syscall_post_uuidgen(res, store, count)                    \
-  __sanitizer_syscall_post_impl_uuidgen(res, (long long)(store),               \
-                                        (long long)(count))
-#define __sanitizer_syscall_pre_getvfsstat(buf, bufsize, flags)                \
-  __sanitizer_syscall_pre_impl_getvfsstat(                                     \
-      (long long)(buf), (long long)(bufsize), (long long)(flags))
-#define __sanitizer_syscall_post_getvfsstat(res, buf, bufsize, flags)          \
-  __sanitizer_syscall_post_impl_getvfsstat(                                    \
-      res, (long long)(buf), (long long)(bufsize), (long long)(flags))
-#define __sanitizer_syscall_pre_statvfs1(path, buf, flags)                     \
-  __sanitizer_syscall_pre_impl_statvfs1((long long)(path), (long long)(buf),   \
-                                        (long long)(flags))
-#define __sanitizer_syscall_post_statvfs1(res, path, buf, flags)               \
-  __sanitizer_syscall_post_impl_statvfs1(res, (long long)(path),               \
-                                         (long long)(buf), (long long)(flags))
-#define __sanitizer_syscall_pre_fstatvfs1(fd, buf, flags)                      \
-  __sanitizer_syscall_pre_impl_fstatvfs1((long long)(fd), (long long)(buf),    \
-                                         (long long)(flags))
-#define __sanitizer_syscall_post_fstatvfs1(res, fd, buf, flags)                \
-  __sanitizer_syscall_post_impl_fstatvfs1(                                     \
-      res, (long long)(fd), (long long)(buf), (long long)(flags))
-#define __sanitizer_syscall_pre_compat_30_fhstatvfs1(fhp, buf, flags)          \
-  __sanitizer_syscall_pre_impl_compat_30_fhstatvfs1(                           \
-      (long long)(fhp), (long long)(buf), (long long)(flags))
-#define __sanitizer_syscall_post_compat_30_fhstatvfs1(res, fhp, buf, flags)    \
-  __sanitizer_syscall_post_impl_compat_30_fhstatvfs1(                          \
-      res, (long long)(fhp), (long long)(buf), (long long)(flags))
-#define __sanitizer_syscall_pre_extattrctl(path, cmd, filename, attrnamespace, \
-                                           attrname)                           \
-  __sanitizer_syscall_pre_impl_extattrctl(                                     \
-      (long long)(path), (long long)(cmd), (long long)(filename),              \
-      (long long)(attrnamespace), (long long)(attrname))
-#define __sanitizer_syscall_post_extattrctl(res, path, cmd, filename,          \
-                                            attrnamespace, attrname)           \
-  __sanitizer_syscall_post_impl_extattrctl(                                    \
-      res, (long long)(path), (long long)(cmd), (long long)(filename),         \
-      (long long)(attrnamespace), (long long)(attrname))
-#define __sanitizer_syscall_pre_extattr_set_file(path, attrnamespace,          \
-                                                 attrname, data, nbytes)       \
-  __sanitizer_syscall_pre_impl_extattr_set_file(                               \
-      (long long)(path), (long long)(attrnamespace), (long long)(attrname),    \
-      (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_post_extattr_set_file(res, path, attrnamespace,    \
-                                                  attrname, data, nbytes)      \
-  __sanitizer_syscall_post_impl_extattr_set_file(                              \
-      res, (long long)(path), (long long)(attrnamespace),                      \
-      (long long)(attrname), (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_pre_extattr_get_file(path, attrnamespace,          \
-                                                 attrname, data, nbytes)       \
-  __sanitizer_syscall_pre_impl_extattr_get_file(                               \
-      (long long)(path), (long long)(attrnamespace), (long long)(attrname),    \
-      (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_post_extattr_get_file(res, path, attrnamespace,    \
-                                                  attrname, data, nbytes)      \
-  __sanitizer_syscall_post_impl_extattr_get_file(                              \
-      res, (long long)(path), (long long)(attrnamespace),                      \
-      (long long)(attrname), (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_pre_extattr_delete_file(path, attrnamespace,       \
-                                                    attrname)                  \
-  __sanitizer_syscall_pre_impl_extattr_delete_file(                            \
-      (long long)(path), (long long)(attrnamespace), (long long)(attrname))
-#define __sanitizer_syscall_post_extattr_delete_file(res, path, attrnamespace, \
-                                                     attrname)                 \
-  __sanitizer_syscall_post_impl_extattr_delete_file(                           \
-      res, (long long)(path), (long long)(attrnamespace),                      \
-      (long long)(attrname))
-#define __sanitizer_syscall_pre_extattr_set_fd(fd, attrnamespace, attrname,    \
-                                               data, nbytes)                   \
-  __sanitizer_syscall_pre_impl_extattr_set_fd(                                 \
-      (long long)(fd), (long long)(attrnamespace), (long long)(attrname),      \
-      (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_post_extattr_set_fd(res, fd, attrnamespace,        \
-                                                attrname, data, nbytes)        \
-  __sanitizer_syscall_post_impl_extattr_set_fd(                                \
-      res, (long long)(fd), (long long)(attrnamespace), (long long)(attrname), \
-      (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_pre_extattr_get_fd(fd, attrnamespace, attrname,    \
-                                               data, nbytes)                   \
-  __sanitizer_syscall_pre_impl_extattr_get_fd(                                 \
-      (long long)(fd), (long long)(attrnamespace), (long long)(attrname),      \
-      (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_post_extattr_get_fd(res, fd, attrnamespace,        \
-                                                attrname, data, nbytes)        \
-  __sanitizer_syscall_post_impl_extattr_get_fd(                                \
-      res, (long long)(fd), (long long)(attrnamespace), (long long)(attrname), \
-      (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_pre_extattr_delete_fd(fd, attrnamespace, attrname) \
-  __sanitizer_syscall_pre_impl_extattr_delete_fd(                              \
-      (long long)(fd), (long long)(attrnamespace), (long long)(attrname))
-#define __sanitizer_syscall_post_extattr_delete_fd(res, fd, attrnamespace,     \
-                                                   attrname)                   \
-  __sanitizer_syscall_post_impl_extattr_delete_fd(                             \
-      res, (long long)(fd), (long long)(attrnamespace), (long long)(attrname))
-#define __sanitizer_syscall_pre_extattr_set_link(path, attrnamespace,          \
-                                                 attrname, data, nbytes)       \
-  __sanitizer_syscall_pre_impl_extattr_set_link(                               \
-      (long long)(path), (long long)(attrnamespace), (long long)(attrname),    \
-      (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_post_extattr_set_link(res, path, attrnamespace,    \
-                                                  attrname, data, nbytes)      \
-  __sanitizer_syscall_post_impl_extattr_set_link(                              \
-      res, (long long)(path), (long long)(attrnamespace),                      \
-      (long long)(attrname), (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_pre_extattr_get_link(path, attrnamespace,          \
-                                                 attrname, data, nbytes)       \
-  __sanitizer_syscall_pre_impl_extattr_get_link(                               \
-      (long long)(path), (long long)(attrnamespace), (long long)(attrname),    \
-      (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_post_extattr_get_link(res, path, attrnamespace,    \
-                                                  attrname, data, nbytes)      \
-  __sanitizer_syscall_post_impl_extattr_get_link(                              \
-      res, (long long)(path), (long long)(attrnamespace),                      \
-      (long long)(attrname), (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_pre_extattr_delete_link(path, attrnamespace,       \
-                                                    attrname)                  \
-  __sanitizer_syscall_pre_impl_extattr_delete_link(                            \
-      (long long)(path), (long long)(attrnamespace), (long long)(attrname))
-#define __sanitizer_syscall_post_extattr_delete_link(res, path, attrnamespace, \
-                                                     attrname)                 \
-  __sanitizer_syscall_post_impl_extattr_delete_link(                           \
-      res, (long long)(path), (long long)(attrnamespace),                      \
-      (long long)(attrname))
-#define __sanitizer_syscall_pre_extattr_list_fd(fd, attrnamespace, data,       \
-                                                nbytes)                        \
-  __sanitizer_syscall_pre_impl_extattr_list_fd(                                \
-      (long long)(fd), (long long)(attrnamespace), (long long)(data),          \
-      (long long)(nbytes))
-#define __sanitizer_syscall_post_extattr_list_fd(res, fd, attrnamespace, data, \
-                                                 nbytes)                       \
-  __sanitizer_syscall_post_impl_extattr_list_fd(                               \
-      res, (long long)(fd), (long long)(attrnamespace), (long long)(data),     \
-      (long long)(nbytes))
-#define __sanitizer_syscall_pre_extattr_list_file(path, attrnamespace, data,   \
-                                                  nbytes)                      \
-  __sanitizer_syscall_pre_impl_extattr_list_file(                              \
-      (long long)(path), (long long)(attrnamespace), (long long)(data),        \
-      (long long)(nbytes))
-#define __sanitizer_syscall_post_extattr_list_file(res, path, attrnamespace,   \
-                                                   data, nbytes)               \
-  __sanitizer_syscall_post_impl_extattr_list_file(                             \
-      res, (long long)(path), (long long)(attrnamespace), (long long)(data),   \
-      (long long)(nbytes))
-#define __sanitizer_syscall_pre_extattr_list_link(path, attrnamespace, data,   \
-                                                  nbytes)                      \
-  __sanitizer_syscall_pre_impl_extattr_list_link(                              \
-      (long long)(path), (long long)(attrnamespace), (long long)(data),        \
-      (long long)(nbytes))
-#define __sanitizer_syscall_post_extattr_list_link(res, path, attrnamespace,   \
-                                                   data, nbytes)               \
-  __sanitizer_syscall_post_impl_extattr_list_link(                             \
-      res, (long long)(path), (long long)(attrnamespace), (long long)(data),   \
-      (long long)(nbytes))
-#define __sanitizer_syscall_pre_compat_50_pselect(nd, in, ou, ex, ts, mask)    \
-  __sanitizer_syscall_pre_impl_compat_50_pselect(                              \
-      (long long)(nd), (long long)(in), (long long)(ou), (long long)(ex),      \
-      (long long)(ts), (long long)(mask))
-#define __sanitizer_syscall_post_compat_50_pselect(res, nd, in, ou, ex, ts,    \
-                                                   mask)                       \
-  __sanitizer_syscall_post_impl_compat_50_pselect(                             \
-      res, (long long)(nd), (long long)(in), (long long)(ou), (long long)(ex), \
-      (long long)(ts), (long long)(mask))
-#define __sanitizer_syscall_pre_compat_50_pollts(fds, nfds, ts, mask)          \
-  __sanitizer_syscall_pre_impl_compat_50_pollts(                               \
-      (long long)(fds), (long long)(nfds), (long long)(ts), (long long)(mask))
-#define __sanitizer_syscall_post_compat_50_pollts(res, fds, nfds, ts, mask)    \
-  __sanitizer_syscall_post_impl_compat_50_pollts(                              \
-      res, (long long)(fds), (long long)(nfds), (long long)(ts),               \
-      (long long)(mask))
-#define __sanitizer_syscall_pre_setxattr(path, name, value, size, flags)       \
-  __sanitizer_syscall_pre_impl_setxattr((long long)(path), (long long)(name),  \
-                                        (long long)(value), (long long)(size), \
-                                        (long long)(flags))
-#define __sanitizer_syscall_post_setxattr(res, path, name, value, size, flags) \
-  __sanitizer_syscall_post_impl_setxattr(                                      \
-      res, (long long)(path), (long long)(name), (long long)(value),           \
-      (long long)(size), (long long)(flags))
-#define __sanitizer_syscall_pre_lsetxattr(path, name, value, size, flags)      \
-  __sanitizer_syscall_pre_impl_lsetxattr(                                      \
-      (long long)(path), (long long)(name), (long long)(value),                \
-      (long long)(size), (long long)(flags))
-#define __sanitizer_syscall_post_lsetxattr(res, path, name, value, size,       \
-                                           flags)                              \
-  __sanitizer_syscall_post_impl_lsetxattr(                                     \
-      res, (long long)(path), (long long)(name), (long long)(value),           \
-      (long long)(size), (long long)(flags))
-#define __sanitizer_syscall_pre_fsetxattr(fd, name, value, size, flags)        \
-  __sanitizer_syscall_pre_impl_fsetxattr(                                      \
-      (long long)(fd), (long long)(name), (long long)(value),                  \
-      (long long)(size), (long long)(flags))
-#define __sanitizer_syscall_post_fsetxattr(res, fd, name, value, size, flags)  \
-  __sanitizer_syscall_post_impl_fsetxattr(                                     \
-      res, (long long)(fd), (long long)(name), (long long)(value),             \
-      (long long)(size), (long long)(flags))
-#define __sanitizer_syscall_pre_getxattr(path, name, value, size)              \
-  __sanitizer_syscall_pre_impl_getxattr((long long)(path), (long long)(name),  \
-                                        (long long)(value), (long long)(size))
-#define __sanitizer_syscall_post_getxattr(res, path, name, value, size)        \
-  __sanitizer_syscall_post_impl_getxattr(                                      \
-      res, (long long)(path), (long long)(name), (long long)(value),           \
-      (long long)(size))
-#define __sanitizer_syscall_pre_lgetxattr(path, name, value, size)             \
-  __sanitizer_syscall_pre_impl_lgetxattr((long long)(path), (long long)(name), \
-                                         (long long)(value),                   \
-                                         (long long)(size))
-#define __sanitizer_syscall_post_lgetxattr(res, path, name, value, size)       \
-  __sanitizer_syscall_post_impl_lgetxattr(                                     \
-      res, (long long)(path), (long long)(name), (long long)(value),           \
-      (long long)(size))
-#define __sanitizer_syscall_pre_fgetxattr(fd, name, value, size)               \
-  __sanitizer_syscall_pre_impl_fgetxattr((long long)(fd), (long long)(name),   \
-                                         (long long)(value),                   \
-                                         (long long)(size))
-#define __sanitizer_syscall_post_fgetxattr(res, fd, name, value, size)         \
-  __sanitizer_syscall_post_impl_fgetxattr(                                     \
-      res, (long long)(fd), (long long)(name), (long long)(value),             \
-      (long long)(size))
-#define __sanitizer_syscall_pre_listxattr(path, list, size)                    \
-  __sanitizer_syscall_pre_impl_listxattr((long long)(path), (long long)(list), \
-                                         (long long)(size))
-#define __sanitizer_syscall_post_listxattr(res, path, list, size)              \
-  __sanitizer_syscall_post_impl_listxattr(                                     \
-      res, (long long)(path), (long long)(list), (long long)(size))
-#define __sanitizer_syscall_pre_llistxattr(path, list, size)                   \
-  __sanitizer_syscall_pre_impl_llistxattr(                                     \
-      (long long)(path), (long long)(list), (long long)(size))
-#define __sanitizer_syscall_post_llistxattr(res, path, list, size)             \
-  __sanitizer_syscall_post_impl_llistxattr(                                    \
-      res, (long long)(path), (long long)(list), (long long)(size))
-#define __sanitizer_syscall_pre_flistxattr(fd, list, size)                     \
-  __sanitizer_syscall_pre_impl_flistxattr((long long)(fd), (long long)(list),  \
-                                          (long long)(size))
-#define __sanitizer_syscall_post_flistxattr(res, fd, list, size)               \
-  __sanitizer_syscall_post_impl_flistxattr(                                    \
-      res, (long long)(fd), (long long)(list), (long long)(size))
-#define __sanitizer_syscall_pre_removexattr(path, name)                        \
-  __sanitizer_syscall_pre_impl_removexattr((long long)(path), (long long)(name))
-#define __sanitizer_syscall_post_removexattr(res, path, name)                  \
-  __sanitizer_syscall_post_impl_removexattr(res, (long long)(path),            \
-                                            (long long)(name))
-#define __sanitizer_syscall_pre_lremovexattr(path, name)                       \
-  __sanitizer_syscall_pre_impl_lremovexattr((long long)(path),                 \
-                                            (long long)(name))
-#define __sanitizer_syscall_post_lremovexattr(res, path, name)                 \
-  __sanitizer_syscall_post_impl_lremovexattr(res, (long long)(path),           \
-                                             (long long)(name))
-#define __sanitizer_syscall_pre_fremovexattr(fd, name)                         \
-  __sanitizer_syscall_pre_impl_fremovexattr((long long)(fd), (long long)(name))
-#define __sanitizer_syscall_post_fremovexattr(res, fd, name)                   \
-  __sanitizer_syscall_post_impl_fremovexattr(res, (long long)(fd),             \
-                                             (long long)(name))
-#define __sanitizer_syscall_pre_compat_50___stat30(path, ub)                   \
-  __sanitizer_syscall_pre_impl_compat_50___stat30((long long)(path),           \
-                                                  (long long)(ub))
-#define __sanitizer_syscall_post_compat_50___stat30(res, path, ub)             \
-  __sanitizer_syscall_post_impl_compat_50___stat30(res, (long long)(path),     \
-                                                   (long long)(ub))
-#define __sanitizer_syscall_pre_compat_50___fstat30(fd, sb)                    \
-  __sanitizer_syscall_pre_impl_compat_50___fstat30((long long)(fd),            \
-                                                   (long long)(sb))
-#define __sanitizer_syscall_post_compat_50___fstat30(res, fd, sb)              \
-  __sanitizer_syscall_post_impl_compat_50___fstat30(res, (long long)(fd),      \
-                                                    (long long)(sb))
-#define __sanitizer_syscall_pre_compat_50___lstat30(path, ub)                  \
-  __sanitizer_syscall_pre_impl_compat_50___lstat30((long long)(path),          \
-                                                   (long long)(ub))
-#define __sanitizer_syscall_post_compat_50___lstat30(res, path, ub)            \
-  __sanitizer_syscall_post_impl_compat_50___lstat30(res, (long long)(path),    \
-                                                    (long long)(ub))
-#define __sanitizer_syscall_pre___getdents30(fd, buf, count)                   \
-  __sanitizer_syscall_pre_impl___getdents30((long long)(fd), (long long)(buf), \
-                                            (long long)(count))
-#define __sanitizer_syscall_post___getdents30(res, fd, buf, count)             \
-  __sanitizer_syscall_post_impl___getdents30(                                  \
-      res, (long long)(fd), (long long)(buf), (long long)(count))
-#define __sanitizer_syscall_pre_posix_fadvise()                                \
-  __sanitizer_syscall_pre_impl_posix_fadvise((long long)())
-#define __sanitizer_syscall_post_posix_fadvise(res)                            \
-  __sanitizer_syscall_post_impl_posix_fadvise(res, (long long)())
-#define __sanitizer_syscall_pre_compat_30___fhstat30(fhp, sb)                  \
-  __sanitizer_syscall_pre_impl_compat_30___fhstat30((long long)(fhp),          \
-                                                    (long long)(sb))
-#define __sanitizer_syscall_post_compat_30___fhstat30(res, fhp, sb)            \
-  __sanitizer_syscall_post_impl_compat_30___fhstat30(res, (long long)(fhp),    \
-                                                     (long long)(sb))
-#define __sanitizer_syscall_pre_compat_50___ntp_gettime30(ntvp)                \
-  __sanitizer_syscall_pre_impl_compat_50___ntp_gettime30((long long)(ntvp))
-#define __sanitizer_syscall_post_compat_50___ntp_gettime30(res, ntvp)          \
-  __sanitizer_syscall_post_impl_compat_50___ntp_gettime30(res,                 \
-                                                          (long long)(ntvp))
-#define __sanitizer_syscall_pre___socket30(domain, type, protocol)             \
-  __sanitizer_syscall_pre_impl___socket30(                                     \
-      (long long)(domain), (long long)(type), (long long)(protocol))
-#define __sanitizer_syscall_post___socket30(res, domain, type, protocol)       \
-  __sanitizer_syscall_post_impl___socket30(                                    \
-      res, (long long)(domain), (long long)(type), (long long)(protocol))
-#define __sanitizer_syscall_pre___getfh30(fname, fhp, fh_size)                 \
-  __sanitizer_syscall_pre_impl___getfh30((long long)(fname), (long long)(fhp), \
-                                         (long long)(fh_size))
-#define __sanitizer_syscall_post___getfh30(res, fname, fhp, fh_size)           \
-  __sanitizer_syscall_post_impl___getfh30(                                     \
-      res, (long long)(fname), (long long)(fhp), (long long)(fh_size))
-#define __sanitizer_syscall_pre___fhopen40(fhp, fh_size, flags)                \
-  __sanitizer_syscall_pre_impl___fhopen40(                                     \
-      (long long)(fhp), (long long)(fh_size), (long long)(flags))
-#define __sanitizer_syscall_post___fhopen40(res, fhp, fh_size, flags)          \
-  __sanitizer_syscall_post_impl___fhopen40(                                    \
-      res, (long long)(fhp), (long long)(fh_size), (long long)(flags))
-#define __sanitizer_syscall_pre___fhstatvfs140(fhp, fh_size, buf, flags)       \
-  __sanitizer_syscall_pre_impl___fhstatvfs140(                                 \
-      (long long)(fhp), (long long)(fh_size), (long long)(buf),                \
-      (long long)(flags))
-#define __sanitizer_syscall_post___fhstatvfs140(res, fhp, fh_size, buf, flags) \
-  __sanitizer_syscall_post_impl___fhstatvfs140(                                \
-      res, (long long)(fhp), (long long)(fh_size), (long long)(buf),           \
-      (long long)(flags))
-#define __sanitizer_syscall_pre_compat_50___fhstat40(fhp, fh_size, sb)         \
-  __sanitizer_syscall_pre_impl_compat_50___fhstat40(                           \
-      (long long)(fhp), (long long)(fh_size), (long long)(sb))
-#define __sanitizer_syscall_post_compat_50___fhstat40(res, fhp, fh_size, sb)   \
-  __sanitizer_syscall_post_impl_compat_50___fhstat40(                          \
-      res, (long long)(fhp), (long long)(fh_size), (long long)(sb))
-#define __sanitizer_syscall_pre_aio_cancel(fildes, aiocbp)                     \
-  __sanitizer_syscall_pre_impl_aio_cancel((long long)(fildes),                 \
-                                          (long long)(aiocbp))
-#define __sanitizer_syscall_post_aio_cancel(res, fildes, aiocbp)               \
-  __sanitizer_syscall_post_impl_aio_cancel(res, (long long)(fildes),           \
-                                           (long long)(aiocbp))
-#define __sanitizer_syscall_pre_aio_error(aiocbp)                              \
-  __sanitizer_syscall_pre_impl_aio_error((long long)(aiocbp))
-#define __sanitizer_syscall_post_aio_error(res, aiocbp)                        \
-  __sanitizer_syscall_post_impl_aio_error(res, (long long)(aiocbp))
-#define __sanitizer_syscall_pre_aio_fsync(op, aiocbp)                          \
-  __sanitizer_syscall_pre_impl_aio_fsync((long long)(op), (long long)(aiocbp))
-#define __sanitizer_syscall_post_aio_fsync(res, op, aiocbp)                    \
-  __sanitizer_syscall_post_impl_aio_fsync(res, (long long)(op),                \
-                                          (long long)(aiocbp))
-#define __sanitizer_syscall_pre_aio_read(aiocbp)                               \
-  __sanitizer_syscall_pre_impl_aio_read((long long)(aiocbp))
-#define __sanitizer_syscall_post_aio_read(res, aiocbp)                         \
-  __sanitizer_syscall_post_impl_aio_read(res, (long long)(aiocbp))
-#define __sanitizer_syscall_pre_aio_return(aiocbp)                             \
-  __sanitizer_syscall_pre_impl_aio_return((long long)(aiocbp))
-#define __sanitizer_syscall_post_aio_return(res, aiocbp)                       \
-  __sanitizer_syscall_post_impl_aio_return(res, (long long)(aiocbp))
-#define __sanitizer_syscall_pre_compat_50_aio_suspend(list, nent, timeout)     \
-  __sanitizer_syscall_pre_impl_compat_50_aio_suspend(                          \
-      (long long)(list), (long long)(nent), (long long)(timeout))
-#define __sanitizer_syscall_post_compat_50_aio_suspend(res, list, nent,        \
-                                                       timeout)                \
-  __sanitizer_syscall_post_impl_compat_50_aio_suspend(                         \
-      res, (long long)(list), (long long)(nent), (long long)(timeout))
-#define __sanitizer_syscall_pre_aio_write(aiocbp)                              \
-  __sanitizer_syscall_pre_impl_aio_write((long long)(aiocbp))
-#define __sanitizer_syscall_post_aio_write(res, aiocbp)                        \
-  __sanitizer_syscall_post_impl_aio_write(res, (long long)(aiocbp))
-#define __sanitizer_syscall_pre_lio_listio(mode, list, nent, sig)              \
-  __sanitizer_syscall_pre_impl_lio_listio((long long)(mode),                   \
-                                          (long long)(list),                   \
-                                          (long long)(nent), (long long)(sig))
-#define __sanitizer_syscall_post_lio_listio(res, mode, list, nent, sig)        \
-  __sanitizer_syscall_post_impl_lio_listio(                                    \
-      res, (long long)(mode), (long long)(list), (long long)(nent),            \
-      (long long)(sig))
-/* syscall 407 has been skipped */
-/* syscall 408 has been skipped */
-/* syscall 409 has been skipped */
-#define __sanitizer_syscall_pre___mount50(type, path, flags, data, data_len)   \
-  __sanitizer_syscall_pre_impl___mount50(                                      \
-      (long long)(type), (long long)(path), (long long)(flags),                \
-      (long long)(data), (long long)(data_len))
-#define __sanitizer_syscall_post___mount50(res, type, path, flags, data,       \
-                                           data_len)                           \
-  __sanitizer_syscall_post_impl___mount50(                                     \
-      res, (long long)(type), (long long)(path), (long long)(flags),           \
-      (long long)(data), (long long)(data_len))
-#define __sanitizer_syscall_pre_mremap(old_address, old_size, new_address,     \
-                                       new_size, flags)                        \
-  __sanitizer_syscall_pre_impl_mremap(                                         \
-      (long long)(old_address), (long long)(old_size),                         \
-      (long long)(new_address), (long long)(new_size), (long long)(flags))
-#define __sanitizer_syscall_post_mremap(res, old_address, old_size,            \
-                                        new_address, new_size, flags)          \
-  __sanitizer_syscall_post_impl_mremap(                                        \
-      res, (long long)(old_address), (long long)(old_size),                    \
-      (long long)(new_address), (long long)(new_size), (long long)(flags))
-#define __sanitizer_syscall_pre_pset_create(psid)                              \
-  __sanitizer_syscall_pre_impl_pset_create((long long)(psid))
-#define __sanitizer_syscall_post_pset_create(res, psid)                        \
-  __sanitizer_syscall_post_impl_pset_create(res, (long long)(psid))
-#define __sanitizer_syscall_pre_pset_destroy(psid)                             \
-  __sanitizer_syscall_pre_impl_pset_destroy((long long)(psid))
-#define __sanitizer_syscall_post_pset_destroy(res, psid)                       \
-  __sanitizer_syscall_post_impl_pset_destroy(res, (long long)(psid))
-#define __sanitizer_syscall_pre_pset_assign(psid, cpuid, opsid)                \
-  __sanitizer_syscall_pre_impl_pset_assign(                                    \
-      (long long)(psid), (long long)(cpuid), (long long)(opsid))
-#define __sanitizer_syscall_post_pset_assign(res, psid, cpuid, opsid)          \
-  __sanitizer_syscall_post_impl_pset_assign(                                   \
-      res, (long long)(psid), (long long)(cpuid), (long long)(opsid))
-#define __sanitizer_syscall_pre__pset_bind(idtype, first_id, second_id, psid,  \
-                                           opsid)                              \
-  __sanitizer_syscall_pre_impl__pset_bind(                                     \
-      (long long)(idtype), (long long)(first_id), (long long)(second_id),      \
-      (long long)(psid), (long long)(opsid))
-#define __sanitizer_syscall_post__pset_bind(res, idtype, first_id, second_id,  \
-                                            psid, opsid)                       \
-  __sanitizer_syscall_post_impl__pset_bind(                                    \
-      res, (long long)(idtype), (long long)(first_id), (long long)(second_id), \
-      (long long)(psid), (long long)(opsid))
-#define __sanitizer_syscall_pre___posix_fadvise50(fd, PAD, offset, len,        \
-                                                  advice)                      \
-  __sanitizer_syscall_pre_impl___posix_fadvise50(                              \
-      (long long)(fd), (long long)(PAD), (long long)(offset),                  \
-      (long long)(len), (long long)(advice))
-#define __sanitizer_syscall_post___posix_fadvise50(res, fd, PAD, offset, len,  \
-                                                   advice)                     \
-  __sanitizer_syscall_post_impl___posix_fadvise50(                             \
-      res, (long long)(fd), (long long)(PAD), (long long)(offset),             \
-      (long long)(len), (long long)(advice))
-#define __sanitizer_syscall_pre___select50(nd, in, ou, ex, tv)                 \
-  __sanitizer_syscall_pre_impl___select50((long long)(nd), (long long)(in),    \
-                                          (long long)(ou), (long long)(ex),    \
-                                          (long long)(tv))
-#define __sanitizer_syscall_post___select50(res, nd, in, ou, ex, tv)           \
-  __sanitizer_syscall_post_impl___select50(res, (long long)(nd),               \
-                                           (long long)(in), (long long)(ou),   \
-                                           (long long)(ex), (long long)(tv))
-#define __sanitizer_syscall_pre___gettimeofday50(tp, tzp)                      \
-  __sanitizer_syscall_pre_impl___gettimeofday50((long long)(tp),               \
-                                                (long long)(tzp))
-#define __sanitizer_syscall_post___gettimeofday50(res, tp, tzp)                \
-  __sanitizer_syscall_post_impl___gettimeofday50(res, (long long)(tp),         \
-                                                 (long long)(tzp))
-#define __sanitizer_syscall_pre___settimeofday50(tv, tzp)                      \
-  __sanitizer_syscall_pre_impl___settimeofday50((long long)(tv),               \
-                                                (long long)(tzp))
-#define __sanitizer_syscall_post___settimeofday50(res, tv, tzp)                \
-  __sanitizer_syscall_post_impl___settimeofday50(res, (long long)(tv),         \
-                                                 (long long)(tzp))
-#define __sanitizer_syscall_pre___utimes50(path, tptr)                         \
-  __sanitizer_syscall_pre_impl___utimes50((long long)(path), (long long)(tptr))
-#define __sanitizer_syscall_post___utimes50(res, path, tptr)                   \
-  __sanitizer_syscall_post_impl___utimes50(res, (long long)(path),             \
-                                           (long long)(tptr))
-#define __sanitizer_syscall_pre___adjtime50(delta, olddelta)                   \
-  __sanitizer_syscall_pre_impl___adjtime50((long long)(delta),                 \
-                                           (long long)(olddelta))
-#define __sanitizer_syscall_post___adjtime50(res, delta, olddelta)             \
-  __sanitizer_syscall_post_impl___adjtime50(res, (long long)(delta),           \
-                                            (long long)(olddelta))
-#define __sanitizer_syscall_pre___lfs_segwait50(fsidp, tv)                     \
-  __sanitizer_syscall_pre_impl___lfs_segwait50((long long)(fsidp),             \
-                                               (long long)(tv))
-#define __sanitizer_syscall_post___lfs_segwait50(res, fsidp, tv)               \
-  __sanitizer_syscall_post_impl___lfs_segwait50(res, (long long)(fsidp),       \
-                                                (long long)(tv))
-#define __sanitizer_syscall_pre___futimes50(fd, tptr)                          \
-  __sanitizer_syscall_pre_impl___futimes50((long long)(fd), (long long)(tptr))
-#define __sanitizer_syscall_post___futimes50(res, fd, tptr)                    \
-  __sanitizer_syscall_post_impl___futimes50(res, (long long)(fd),              \
-                                            (long long)(tptr))
-#define __sanitizer_syscall_pre___lutimes50(path, tptr)                        \
-  __sanitizer_syscall_pre_impl___lutimes50((long long)(path), (long long)(tptr))
-#define __sanitizer_syscall_post___lutimes50(res, path, tptr)                  \
-  __sanitizer_syscall_post_impl___lutimes50(res, (long long)(path),            \
-                                            (long long)(tptr))
-#define __sanitizer_syscall_pre___setitimer50(which, itv, oitv)                \
-  __sanitizer_syscall_pre_impl___setitimer50(                                  \
-      (long long)(which), (long long)(itv), (long long)(oitv))
-#define __sanitizer_syscall_post___setitimer50(res, which, itv, oitv)          \
-  __sanitizer_syscall_post_impl___setitimer50(                                 \
-      res, (long long)(which), (long long)(itv), (long long)(oitv))
-#define __sanitizer_syscall_pre___getitimer50(which, itv)                      \
-  __sanitizer_syscall_pre_impl___getitimer50((long long)(which),               \
-                                             (long long)(itv))
-#define __sanitizer_syscall_post___getitimer50(res, which, itv)                \
-  __sanitizer_syscall_post_impl___getitimer50(res, (long long)(which),         \
-                                              (long long)(itv))
-#define __sanitizer_syscall_pre___clock_gettime50(clock_id, tp)                \
-  __sanitizer_syscall_pre_impl___clock_gettime50((long long)(clock_id),        \
-                                                 (long long)(tp))
-#define __sanitizer_syscall_post___clock_gettime50(res, clock_id, tp)          \
-  __sanitizer_syscall_post_impl___clock_gettime50(res, (long long)(clock_id),  \
-                                                  (long long)(tp))
-#define __sanitizer_syscall_pre___clock_settime50(clock_id, tp)                \
-  __sanitizer_syscall_pre_impl___clock_settime50((long long)(clock_id),        \
-                                                 (long long)(tp))
-#define __sanitizer_syscall_post___clock_settime50(res, clock_id, tp)          \
-  __sanitizer_syscall_post_impl___clock_settime50(res, (long long)(clock_id),  \
-                                                  (long long)(tp))
-#define __sanitizer_syscall_pre___clock_getres50(clock_id, tp)                 \
-  __sanitizer_syscall_pre_impl___clock_getres50((long long)(clock_id),         \
-                                                (long long)(tp))
-#define __sanitizer_syscall_post___clock_getres50(res, clock_id, tp)           \
-  __sanitizer_syscall_post_impl___clock_getres50(res, (long long)(clock_id),   \
-                                                 (long long)(tp))
-#define __sanitizer_syscall_pre___nanosleep50(rqtp, rmtp)                      \
-  __sanitizer_syscall_pre_impl___nanosleep50((long long)(rqtp),                \
-                                             (long long)(rmtp))
-#define __sanitizer_syscall_post___nanosleep50(res, rqtp, rmtp)                \
-  __sanitizer_syscall_post_impl___nanosleep50(res, (long long)(rqtp),          \
-                                              (long long)(rmtp))
-#define __sanitizer_syscall_pre_____sigtimedwait50(set, info, timeout)         \
-  __sanitizer_syscall_pre_impl_____sigtimedwait50(                             \
-      (long long)(set), (long long)(info), (long long)(timeout))
-#define __sanitizer_syscall_post_____sigtimedwait50(res, set, info, timeout)   \
-  __sanitizer_syscall_post_impl_____sigtimedwait50(                            \
-      res, (long long)(set), (long long)(info), (long long)(timeout))
-#define __sanitizer_syscall_pre___mq_timedsend50(mqdes, msg_ptr, msg_len,      \
-                                                 msg_prio, abs_timeout)        \
-  __sanitizer_syscall_pre_impl___mq_timedsend50(                               \
-      (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),          \
-      (long long)(msg_prio), (long long)(abs_timeout))
-#define __sanitizer_syscall_post___mq_timedsend50(                             \
-    res, mqdes, msg_ptr, msg_len, msg_prio, abs_timeout)                       \
-  __sanitizer_syscall_post_impl___mq_timedsend50(                              \
-      res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),     \
-      (long long)(msg_prio), (long long)(abs_timeout))
-#define __sanitizer_syscall_pre___mq_timedreceive50(mqdes, msg_ptr, msg_len,   \
-                                                    msg_prio, abs_timeout)     \
-  __sanitizer_syscall_pre_impl___mq_timedreceive50(                            \
-      (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),          \
-      (long long)(msg_prio), (long long)(abs_timeout))
-#define __sanitizer_syscall_post___mq_timedreceive50(                          \
-    res, mqdes, msg_ptr, msg_len, msg_prio, abs_timeout)                       \
-  __sanitizer_syscall_post_impl___mq_timedreceive50(                           \
-      res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),     \
-      (long long)(msg_prio), (long long)(abs_timeout))
-#define __sanitizer_syscall_pre_compat_60__lwp_park(ts, unpark, hint,          \
-                                                    unparkhint)                \
-  __sanitizer_syscall_pre_impl_compat_60__lwp_park(                            \
-      (long long)(ts), (long long)(unpark), (long long)(hint),                 \
-      (long long)(unparkhint))
-#define __sanitizer_syscall_post_compat_60__lwp_park(res, ts, unpark, hint,    \
-                                                     unparkhint)               \
-  __sanitizer_syscall_post_impl_compat_60__lwp_park(                           \
-      res, (long long)(ts), (long long)(unpark), (long long)(hint),            \
-      (long long)(unparkhint))
-#define __sanitizer_syscall_pre___kevent50(fd, changelist, nchanges,           \
-                                           eventlist, nevents, timeout)        \
-  __sanitizer_syscall_pre_impl___kevent50(                                     \
-      (long long)(fd), (long long)(changelist), (long long)(nchanges),         \
-      (long long)(eventlist), (long long)(nevents), (long long)(timeout))
-#define __sanitizer_syscall_post___kevent50(res, fd, changelist, nchanges,     \
-                                            eventlist, nevents, timeout)       \
-  __sanitizer_syscall_post_impl___kevent50(                                    \
-      res, (long long)(fd), (long long)(changelist), (long long)(nchanges),    \
-      (long long)(eventlist), (long long)(nevents), (long long)(timeout))
-#define __sanitizer_syscall_pre___pselect50(nd, in, ou, ex, ts, mask)          \
-  __sanitizer_syscall_pre_impl___pselect50((long long)(nd), (long long)(in),   \
-                                           (long long)(ou), (long long)(ex),   \
-                                           (long long)(ts), (long long)(mask))
-#define __sanitizer_syscall_post___pselect50(res, nd, in, ou, ex, ts, mask)    \
-  __sanitizer_syscall_post_impl___pselect50(                                   \
-      res, (long long)(nd), (long long)(in), (long long)(ou), (long long)(ex), \
-      (long long)(ts), (long long)(mask))
-#define __sanitizer_syscall_pre___pollts50(fds, nfds, ts, mask)                \
-  __sanitizer_syscall_pre_impl___pollts50((long long)(fds), (long long)(nfds), \
-                                          (long long)(ts), (long long)(mask))
-#define __sanitizer_syscall_post___pollts50(res, fds, nfds, ts, mask)          \
-  __sanitizer_syscall_post_impl___pollts50(res, (long long)(fds),              \
-                                           (long long)(nfds), (long long)(ts), \
-                                           (long long)(mask))
-#define __sanitizer_syscall_pre___aio_suspend50(list, nent, timeout)           \
-  __sanitizer_syscall_pre_impl___aio_suspend50(                                \
-      (long long)(list), (long long)(nent), (long long)(timeout))
-#define __sanitizer_syscall_post___aio_suspend50(res, list, nent, timeout)     \
-  __sanitizer_syscall_post_impl___aio_suspend50(                               \
-      res, (long long)(list), (long long)(nent), (long long)(timeout))
-#define __sanitizer_syscall_pre___stat50(path, ub)                             \
-  __sanitizer_syscall_pre_impl___stat50((long long)(path), (long long)(ub))
-#define __sanitizer_syscall_post___stat50(res, path, ub)                       \
-  __sanitizer_syscall_post_impl___stat50(res, (long long)(path),               \
-                                         (long long)(ub))
-#define __sanitizer_syscall_pre___fstat50(fd, sb)                              \
-  __sanitizer_syscall_pre_impl___fstat50((long long)(fd), (long long)(sb))
-#define __sanitizer_syscall_post___fstat50(res, fd, sb)                        \
-  __sanitizer_syscall_post_impl___fstat50(res, (long long)(fd), (long long)(sb))
-#define __sanitizer_syscall_pre___lstat50(path, ub)                            \
-  __sanitizer_syscall_pre_impl___lstat50((long long)(path), (long long)(ub))
-#define __sanitizer_syscall_post___lstat50(res, path, ub)                      \
-  __sanitizer_syscall_post_impl___lstat50(res, (long long)(path),              \
-                                          (long long)(ub))
-#define __sanitizer_syscall_pre_____semctl50(semid, semnum, cmd, arg)          \
-  __sanitizer_syscall_pre_impl_____semctl50(                                   \
-      (long long)(semid), (long long)(semnum), (long long)(cmd),               \
-      (long long)(arg))
-#define __sanitizer_syscall_post_____semctl50(res, semid, semnum, cmd, arg)    \
-  __sanitizer_syscall_post_impl_____semctl50(                                  \
-      res, (long long)(semid), (long long)(semnum), (long long)(cmd),          \
-      (long long)(arg))
-#define __sanitizer_syscall_pre___shmctl50(shmid, cmd, buf)                    \
-  __sanitizer_syscall_pre_impl___shmctl50((long long)(shmid),                  \
-                                          (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_post___shmctl50(res, shmid, cmd, buf)              \
-  __sanitizer_syscall_post_impl___shmctl50(res, (long long)(shmid),            \
-                                           (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_pre___msgctl50(msqid, cmd, buf)                    \
-  __sanitizer_syscall_pre_impl___msgctl50((long long)(msqid),                  \
-                                          (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_post___msgctl50(res, msqid, cmd, buf)              \
-  __sanitizer_syscall_post_impl___msgctl50(res, (long long)(msqid),            \
-                                           (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_pre___getrusage50(who, rusage)                     \
-  __sanitizer_syscall_pre_impl___getrusage50((long long)(who),                 \
-                                             (long long)(rusage))
-#define __sanitizer_syscall_post___getrusage50(res, who, rusage)               \
-  __sanitizer_syscall_post_impl___getrusage50(res, (long long)(who),           \
-                                              (long long)(rusage))
-#define __sanitizer_syscall_pre___timer_settime50(timerid, flags, value,       \
-                                                  ovalue)                      \
-  __sanitizer_syscall_pre_impl___timer_settime50(                              \
-      (long long)(timerid), (long long)(flags), (long long)(value),            \
-      (long long)(ovalue))
-#define __sanitizer_syscall_post___timer_settime50(res, timerid, flags, value, \
-                                                   ovalue)                     \
-  __sanitizer_syscall_post_impl___timer_settime50(                             \
-      res, (long long)(timerid), (long long)(flags), (long long)(value),       \
-      (long long)(ovalue))
-#define __sanitizer_syscall_pre___timer_gettime50(timerid, value)              \
-  __sanitizer_syscall_pre_impl___timer_gettime50((long long)(timerid),         \
-                                                 (long long)(value))
-#define __sanitizer_syscall_post___timer_gettime50(res, timerid, value)        \
-  __sanitizer_syscall_post_impl___timer_gettime50(res, (long long)(timerid),   \
-                                                  (long long)(value))
-#if defined(NTP) || !defined(_KERNEL_OPT)
-#define __sanitizer_syscall_pre___ntp_gettime50(ntvp)                          \
-  __sanitizer_syscall_pre_impl___ntp_gettime50((long long)(ntvp))
-#define __sanitizer_syscall_post___ntp_gettime50(res, ntvp)                    \
-  __sanitizer_syscall_post_impl___ntp_gettime50(res, (long long)(ntvp))
-#else
-/* syscall 448 has been skipped */
-#endif
-#define __sanitizer_syscall_pre___wait450(pid, status, options, rusage)        \
-  __sanitizer_syscall_pre_impl___wait450(                                      \
-      (long long)(pid), (long long)(status), (long long)(options),             \
-      (long long)(rusage))
-#define __sanitizer_syscall_post___wait450(res, pid, status, options, rusage)  \
-  __sanitizer_syscall_post_impl___wait450(                                     \
-      res, (long long)(pid), (long long)(status), (long long)(options),        \
-      (long long)(rusage))
-#define __sanitizer_syscall_pre___mknod50(path, mode, dev)                     \
-  __sanitizer_syscall_pre_impl___mknod50((long long)(path), (long long)(mode), \
-                                         (long long)(dev))
-#define __sanitizer_syscall_post___mknod50(res, path, mode, dev)               \
-  __sanitizer_syscall_post_impl___mknod50(res, (long long)(path),              \
-                                          (long long)(mode), (long long)(dev))
-#define __sanitizer_syscall_pre___fhstat50(fhp, fh_size, sb)                   \
-  __sanitizer_syscall_pre_impl___fhstat50(                                     \
-      (long long)(fhp), (long long)(fh_size), (long long)(sb))
-#define __sanitizer_syscall_post___fhstat50(res, fhp, fh_size, sb)             \
-  __sanitizer_syscall_post_impl___fhstat50(                                    \
-      res, (long long)(fhp), (long long)(fh_size), (long long)(sb))
-/* syscall 452 has been skipped */
-#define __sanitizer_syscall_pre_pipe2(fildes, flags)                           \
-  __sanitizer_syscall_pre_impl_pipe2((long long)(fildes), (long long)(flags))
-#define __sanitizer_syscall_post_pipe2(res, fildes, flags)                     \
-  __sanitizer_syscall_post_impl_pipe2(res, (long long)(fildes),                \
-                                      (long long)(flags))
-#define __sanitizer_syscall_pre_dup3(from, to, flags)                          \
-  __sanitizer_syscall_pre_impl_dup3((long long)(from), (long long)(to),        \
-                                    (long long)(flags))
-#define __sanitizer_syscall_post_dup3(res, from, to, flags)                    \
-  __sanitizer_syscall_post_impl_dup3(res, (long long)(from), (long long)(to),  \
-                                     (long long)(flags))
-#define __sanitizer_syscall_pre_kqueue1(flags)                                 \
-  __sanitizer_syscall_pre_impl_kqueue1((long long)(flags))
-#define __sanitizer_syscall_post_kqueue1(res, flags)                           \
-  __sanitizer_syscall_post_impl_kqueue1(res, (long long)(flags))
-#define __sanitizer_syscall_pre_paccept(s, name, anamelen, mask, flags)        \
-  __sanitizer_syscall_pre_impl_paccept((long long)(s), (long long)(name),      \
-                                       (long long)(anamelen),                  \
-                                       (long long)(mask), (long long)(flags))
-#define __sanitizer_syscall_post_paccept(res, s, name, anamelen, mask, flags)  \
-  __sanitizer_syscall_post_impl_paccept(                                       \
-      res, (long long)(s), (long long)(name), (long long)(anamelen),           \
-      (long long)(mask), (long long)(flags))
-#define __sanitizer_syscall_pre_linkat(fd1, name1, fd2, name2, flags)          \
-  __sanitizer_syscall_pre_impl_linkat((long long)(fd1), (long long)(name1),    \
-                                      (long long)(fd2), (long long)(name2),    \
-                                      (long long)(flags))
-#define __sanitizer_syscall_post_linkat(res, fd1, name1, fd2, name2, flags)    \
-  __sanitizer_syscall_post_impl_linkat(res, (long long)(fd1),                  \
-                                       (long long)(name1), (long long)(fd2),   \
-                                       (long long)(name2), (long long)(flags))
-#define __sanitizer_syscall_pre_renameat(fromfd, from, tofd, to)               \
-  __sanitizer_syscall_pre_impl_renameat((long long)(fromfd),                   \
-                                        (long long)(from), (long long)(tofd),  \
-                                        (long long)(to))
-#define __sanitizer_syscall_post_renameat(res, fromfd, from, tofd, to)         \
-  __sanitizer_syscall_post_impl_renameat(res, (long long)(fromfd),             \
-                                         (long long)(from), (long long)(tofd), \
-                                         (long long)(to))
-#define __sanitizer_syscall_pre_mkfifoat(fd, path, mode)                       \
-  __sanitizer_syscall_pre_impl_mkfifoat((long long)(fd), (long long)(path),    \
-                                        (long long)(mode))
-#define __sanitizer_syscall_post_mkfifoat(res, fd, path, mode)                 \
-  __sanitizer_syscall_post_impl_mkfifoat(res, (long long)(fd),                 \
-                                         (long long)(path), (long long)(mode))
-#define __sanitizer_syscall_pre_mknodat(fd, path, mode, PAD, dev)              \
-  __sanitizer_syscall_pre_impl_mknodat((long long)(fd), (long long)(path),     \
-                                       (long long)(mode), (long long)(PAD),    \
-                                       (long long)(dev))
-#define __sanitizer_syscall_post_mknodat(res, fd, path, mode, PAD, dev)        \
-  __sanitizer_syscall_post_impl_mknodat(res, (long long)(fd),                  \
-                                        (long long)(path), (long long)(mode),  \
-                                        (long long)(PAD), (long long)(dev))
-#define __sanitizer_syscall_pre_mkdirat(fd, path, mode)                        \
-  __sanitizer_syscall_pre_impl_mkdirat((long long)(fd), (long long)(path),     \
-                                       (long long)(mode))
-#define __sanitizer_syscall_post_mkdirat(res, fd, path, mode)                  \
-  __sanitizer_syscall_post_impl_mkdirat(res, (long long)(fd),                  \
-                                        (long long)(path), (long long)(mode))
-#define __sanitizer_syscall_pre_faccessat(fd, path, amode, flag)               \
-  __sanitizer_syscall_pre_impl_faccessat((long long)(fd), (long long)(path),   \
-                                         (long long)(amode),                   \
-                                         (long long)(flag))
-#define __sanitizer_syscall_post_faccessat(res, fd, path, amode, flag)         \
-  __sanitizer_syscall_post_impl_faccessat(                                     \
-      res, (long long)(fd), (long long)(path), (long long)(amode),             \
-      (long long)(flag))
-#define __sanitizer_syscall_pre_fchmodat(fd, path, mode, flag)                 \
-  __sanitizer_syscall_pre_impl_fchmodat((long long)(fd), (long long)(path),    \
-                                        (long long)(mode), (long long)(flag))
-#define __sanitizer_syscall_post_fchmodat(res, fd, path, mode, flag)           \
-  __sanitizer_syscall_post_impl_fchmodat(res, (long long)(fd),                 \
-                                         (long long)(path), (long long)(mode), \
-                                         (long long)(flag))
-#define __sanitizer_syscall_pre_fchownat(fd, path, owner, group, flag)         \
-  __sanitizer_syscall_pre_impl_fchownat((long long)(fd), (long long)(path),    \
-                                        (long long)(owner),                    \
-                                        (long long)(group), (long long)(flag))
-#define __sanitizer_syscall_post_fchownat(res, fd, path, owner, group, flag)   \
-  __sanitizer_syscall_post_impl_fchownat(                                      \
-      res, (long long)(fd), (long long)(path), (long long)(owner),             \
-      (long long)(group), (long long)(flag))
-#define __sanitizer_syscall_pre_fexecve(fd, argp, envp)                        \
-  __sanitizer_syscall_pre_impl_fexecve((long long)(fd), (long long)(argp),     \
-                                       (long long)(envp))
-#define __sanitizer_syscall_post_fexecve(res, fd, argp, envp)                  \
-  __sanitizer_syscall_post_impl_fexecve(res, (long long)(fd),                  \
-                                        (long long)(argp), (long long)(envp))
-#define __sanitizer_syscall_pre_fstatat(fd, path, buf, flag)                   \
-  __sanitizer_syscall_pre_impl_fstatat((long long)(fd), (long long)(path),     \
-                                       (long long)(buf), (long long)(flag))
-#define __sanitizer_syscall_post_fstatat(res, fd, path, buf, flag)             \
-  __sanitizer_syscall_post_impl_fstatat(res, (long long)(fd),                  \
-                                        (long long)(path), (long long)(buf),   \
-                                        (long long)(flag))
-#define __sanitizer_syscall_pre_utimensat(fd, path, tptr, flag)                \
-  __sanitizer_syscall_pre_impl_utimensat((long long)(fd), (long long)(path),   \
-                                         (long long)(tptr), (long long)(flag))
-#define __sanitizer_syscall_post_utimensat(res, fd, path, tptr, flag)          \
-  __sanitizer_syscall_post_impl_utimensat(                                     \
-      res, (long long)(fd), (long long)(path), (long long)(tptr),              \
-      (long long)(flag))
-#define __sanitizer_syscall_pre_openat(fd, path, oflags, mode)                 \
-  __sanitizer_syscall_pre_impl_openat((long long)(fd), (long long)(path),      \
-                                      (long long)(oflags), (long long)(mode))
-#define __sanitizer_syscall_post_openat(res, fd, path, oflags, mode)           \
-  __sanitizer_syscall_post_impl_openat(res, (long long)(fd),                   \
-                                       (long long)(path), (long long)(oflags), \
-                                       (long long)(mode))
-#define __sanitizer_syscall_pre_readlinkat(fd, path, buf, bufsize)             \
-  __sanitizer_syscall_pre_impl_readlinkat((long long)(fd), (long long)(path),  \
-                                          (long long)(buf),                    \
-                                          (long long)(bufsize))
-#define __sanitizer_syscall_post_readlinkat(res, fd, path, buf, bufsize)       \
-  __sanitizer_syscall_post_impl_readlinkat(                                    \
-      res, (long long)(fd), (long long)(path), (long long)(buf),               \
-      (long long)(bufsize))
-#define __sanitizer_syscall_pre_symlinkat(path1, fd, path2)                    \
-  __sanitizer_syscall_pre_impl_symlinkat((long long)(path1), (long long)(fd),  \
-                                         (long long)(path2))
-#define __sanitizer_syscall_post_symlinkat(res, path1, fd, path2)              \
-  __sanitizer_syscall_post_impl_symlinkat(res, (long long)(path1),             \
-                                          (long long)(fd), (long long)(path2))
-#define __sanitizer_syscall_pre_unlinkat(fd, path, flag)                       \
-  __sanitizer_syscall_pre_impl_unlinkat((long long)(fd), (long long)(path),    \
-                                        (long long)(flag))
-#define __sanitizer_syscall_post_unlinkat(res, fd, path, flag)                 \
-  __sanitizer_syscall_post_impl_unlinkat(res, (long long)(fd),                 \
-                                         (long long)(path), (long long)(flag))
-#define __sanitizer_syscall_pre_futimens(fd, tptr)                             \
-  __sanitizer_syscall_pre_impl_futimens((long long)(fd), (long long)(tptr))
-#define __sanitizer_syscall_post_futimens(res, fd, tptr)                       \
-  __sanitizer_syscall_post_impl_futimens(res, (long long)(fd),                 \
-                                         (long long)(tptr))
-#define __sanitizer_syscall_pre___quotactl(path, args)                         \
-  __sanitizer_syscall_pre_impl___quotactl((long long)(path), (long long)(args))
-#define __sanitizer_syscall_post___quotactl(res, path, args)                   \
-  __sanitizer_syscall_post_impl___quotactl(res, (long long)(path),             \
-                                           (long long)(args))
-#define __sanitizer_syscall_pre_posix_spawn(pid, path, file_actions, attrp,    \
-                                            argv, envp)                        \
-  __sanitizer_syscall_pre_impl_posix_spawn(                                    \
-      (long long)(pid), (long long)(path), (long long)(file_actions),          \
-      (long long)(attrp), (long long)(argv), (long long)(envp))
-#define __sanitizer_syscall_post_posix_spawn(res, pid, path, file_actions,     \
-                                             attrp, argv, envp)                \
-  __sanitizer_syscall_post_impl_posix_spawn(                                   \
-      res, (long long)(pid), (long long)(path), (long long)(file_actions),     \
-      (long long)(attrp), (long long)(argv), (long long)(envp))
-#define __sanitizer_syscall_pre_recvmmsg(s, mmsg, vlen, flags, timeout)        \
-  __sanitizer_syscall_pre_impl_recvmmsg((long long)(s), (long long)(mmsg),     \
-                                        (long long)(vlen), (long long)(flags), \
-                                        (long long)(timeout))
-#define __sanitizer_syscall_post_recvmmsg(res, s, mmsg, vlen, flags, timeout)  \
-  __sanitizer_syscall_post_impl_recvmmsg(                                      \
-      res, (long long)(s), (long long)(mmsg), (long long)(vlen),               \
-      (long long)(flags), (long long)(timeout))
-#define __sanitizer_syscall_pre_sendmmsg(s, mmsg, vlen, flags)                 \
-  __sanitizer_syscall_pre_impl_sendmmsg((long long)(s), (long long)(mmsg),     \
-                                        (long long)(vlen), (long long)(flags))
-#define __sanitizer_syscall_post_sendmmsg(res, s, mmsg, vlen, flags)           \
-  __sanitizer_syscall_post_impl_sendmmsg(res, (long long)(s),                  \
-                                         (long long)(mmsg), (long long)(vlen), \
-                                         (long long)(flags))
-#define __sanitizer_syscall_pre_clock_nanosleep(clock_id, flags, rqtp, rmtp)   \
-  __sanitizer_syscall_pre_impl_clock_nanosleep(                                \
-      (long long)(clock_id), (long long)(flags), (long long)(rqtp),            \
-      (long long)(rmtp))
-#define __sanitizer_syscall_post_clock_nanosleep(res, clock_id, flags, rqtp,   \
-                                                 rmtp)                         \
-  __sanitizer_syscall_post_impl_clock_nanosleep(                               \
-      res, (long long)(clock_id), (long long)(flags), (long long)(rqtp),       \
-      (long long)(rmtp))
-#define __sanitizer_syscall_pre____lwp_park60(clock_id, flags, ts, unpark,     \
-                                              hint, unparkhint)                \
-  __sanitizer_syscall_pre_impl____lwp_park60(                                  \
-      (long long)(clock_id), (long long)(flags), (long long)(ts),              \
-      (long long)(unpark), (long long)(hint), (long long)(unparkhint))
-#define __sanitizer_syscall_post____lwp_park60(res, clock_id, flags, ts,       \
-                                               unpark, hint, unparkhint)       \
-  __sanitizer_syscall_post_impl____lwp_park60(                                 \
-      res, (long long)(clock_id), (long long)(flags), (long long)(ts),         \
-      (long long)(unpark), (long long)(hint), (long long)(unparkhint))
-#define __sanitizer_syscall_pre_posix_fallocate(fd, PAD, pos, len)             \
-  __sanitizer_syscall_pre_impl_posix_fallocate(                                \
-      (long long)(fd), (long long)(PAD), (long long)(pos), (long long)(len))
-#define __sanitizer_syscall_post_posix_fallocate(res, fd, PAD, pos, len)       \
-  __sanitizer_syscall_post_impl_posix_fallocate(                               \
-      res, (long long)(fd), (long long)(PAD), (long long)(pos),                \
-      (long long)(len))
-#define __sanitizer_syscall_pre_fdiscard(fd, PAD, pos, len)                    \
-  __sanitizer_syscall_pre_impl_fdiscard((long long)(fd), (long long)(PAD),     \
-                                        (long long)(pos), (long long)(len))
-#define __sanitizer_syscall_post_fdiscard(res, fd, PAD, pos, len)              \
-  __sanitizer_syscall_post_impl_fdiscard(res, (long long)(fd),                 \
-                                         (long long)(PAD), (long long)(pos),   \
-                                         (long long)(len))
-#define __sanitizer_syscall_pre_wait6(idtype, id, status, options, wru, info)  \
-  __sanitizer_syscall_pre_impl_wait6(                                          \
-      (long long)(idtype), (long long)(id), (long long)(status),               \
-      (long long)(options), (long long)(wru), (long long)(info))
-#define __sanitizer_syscall_post_wait6(res, idtype, id, status, options, wru,  \
-                                       info)                                   \
-  __sanitizer_syscall_post_impl_wait6(                                         \
-      res, (long long)(idtype), (long long)(id), (long long)(status),          \
-      (long long)(options), (long long)(wru), (long long)(info))
-#define __sanitizer_syscall_pre_clock_getcpuclockid2(idtype, id, clock_id)     \
-  __sanitizer_syscall_pre_impl_clock_getcpuclockid2(                           \
-      (long long)(idtype), (long long)(id), (long long)(clock_id))
-#define __sanitizer_syscall_post_clock_getcpuclockid2(res, idtype, id,         \
-                                                      clock_id)                \
-  __sanitizer_syscall_post_impl_clock_getcpuclockid2(                          \
-      res, (long long)(idtype), (long long)(id), (long long)(clock_id))
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// Private declarations. Do not call directly from user code. Use macros above.
-
-// DO NOT EDIT! THIS FILE HAS BEEN GENERATED!
-
-void __sanitizer_syscall_pre_impl_syscall(long long code, long long arg0,
-                                          long long arg1, long long arg2,
-                                          long long arg3, long long arg4,
-                                          long long arg5, long long arg6,
-                                          long long arg7);
-void __sanitizer_syscall_post_impl_syscall(long long res, long long code,
-                                           long long arg0, long long arg1,
-                                           long long arg2, long long arg3,
-                                           long long arg4, long long arg5,
-                                           long long arg6, long long arg7);
-void __sanitizer_syscall_pre_impl_exit(long long rval);
-void __sanitizer_syscall_post_impl_exit(long long res, long long rval);
-void __sanitizer_syscall_pre_impl_fork(void);
-void __sanitizer_syscall_post_impl_fork(long long res);
-void __sanitizer_syscall_pre_impl_read(long long fd, long long buf,
-                                       long long nbyte);
-void __sanitizer_syscall_post_impl_read(long long res, long long fd,
-                                        long long buf, long long nbyte);
-void __sanitizer_syscall_pre_impl_write(long long fd, long long buf,
-                                        long long nbyte);
-void __sanitizer_syscall_post_impl_write(long long res, long long fd,
-                                         long long buf, long long nbyte);
-void __sanitizer_syscall_pre_impl_open(long long path, long long flags,
-                                       long long mode);
-void __sanitizer_syscall_post_impl_open(long long res, long long path,
-                                        long long flags, long long mode);
-void __sanitizer_syscall_pre_impl_close(long long fd);
-void __sanitizer_syscall_post_impl_close(long long res, long long fd);
-void __sanitizer_syscall_pre_impl_compat_50_wait4(long long pid,
-                                                  long long status,
-                                                  long long options,
-                                                  long long rusage);
-void __sanitizer_syscall_post_impl_compat_50_wait4(long long res, long long pid,
-                                                   long long status,
-                                                   long long options,
-                                                   long long rusage);
-void __sanitizer_syscall_pre_impl_compat_43_ocreat(long long path,
-                                                   long long mode);
-void __sanitizer_syscall_post_impl_compat_43_ocreat(long long res,
-                                                    long long path,
-                                                    long long mode);
-void __sanitizer_syscall_pre_impl_link(long long path, long long link);
-void __sanitizer_syscall_post_impl_link(long long res, long long path,
-                                        long long link);
-void __sanitizer_syscall_pre_impl_unlink(long long path);
-void __sanitizer_syscall_post_impl_unlink(long long res, long long path);
-/* syscall 11 has been skipped */
-void __sanitizer_syscall_pre_impl_chdir(long long path);
-void __sanitizer_syscall_post_impl_chdir(long long res, long long path);
-void __sanitizer_syscall_pre_impl_fchdir(long long fd);
-void __sanitizer_syscall_post_impl_fchdir(long long res, long long fd);
-void __sanitizer_syscall_pre_impl_compat_50_mknod(long long path,
-                                                  long long mode,
-                                                  long long dev);
-void __sanitizer_syscall_post_impl_compat_50_mknod(long long res,
-                                                   long long path,
-                                                   long long mode,
-                                                   long long dev);
-void __sanitizer_syscall_pre_impl_chmod(long long path, long long mode);
-void __sanitizer_syscall_post_impl_chmod(long long res, long long path,
-                                         long long mode);
-void __sanitizer_syscall_pre_impl_chown(long long path, long long uid,
-                                        long long gid);
-void __sanitizer_syscall_post_impl_chown(long long res, long long path,
-                                         long long uid, long long gid);
-void __sanitizer_syscall_pre_impl_break(long long nsize);
-void __sanitizer_syscall_post_impl_break(long long res, long long nsize);
-void __sanitizer_syscall_pre_impl_compat_20_getfsstat(long long buf,
-                                                      long long bufsize,
-                                                      long long flags);
-void __sanitizer_syscall_post_impl_compat_20_getfsstat(long long res,
-                                                       long long buf,
-                                                       long long bufsize,
-                                                       long long flags);
-void __sanitizer_syscall_pre_impl_compat_43_olseek(long long fd,
-                                                   long long offset,
-                                                   long long whence);
-void __sanitizer_syscall_post_impl_compat_43_olseek(long long res, long long fd,
-                                                    long long offset,
-                                                    long long whence);
-void __sanitizer_syscall_pre_impl_getpid(void);
-void __sanitizer_syscall_post_impl_getpid(long long res);
-void __sanitizer_syscall_pre_impl_compat_40_mount(long long type,
-                                                  long long path,
-                                                  long long flags,
-                                                  long long data);
-void __sanitizer_syscall_post_impl_compat_40_mount(long long res,
-                                                   long long type,
-                                                   long long path,
-                                                   long long flags,
-                                                   long long data);
-void __sanitizer_syscall_pre_impl_unmount(long long path, long long flags);
-void __sanitizer_syscall_post_impl_unmount(long long res, long long path,
-                                           long long flags);
-void __sanitizer_syscall_pre_impl_setuid(long long uid);
-void __sanitizer_syscall_post_impl_setuid(long long res, long long uid);
-void __sanitizer_syscall_pre_impl_getuid(void);
-void __sanitizer_syscall_post_impl_getuid(long long res);
-void __sanitizer_syscall_pre_impl_geteuid(void);
-void __sanitizer_syscall_post_impl_geteuid(long long res);
-void __sanitizer_syscall_pre_impl_ptrace(long long req, long long pid,
-                                         long long addr, long long data);
-void __sanitizer_syscall_post_impl_ptrace(long long res, long long req,
-                                          long long pid, long long addr,
-                                          long long data);
-void __sanitizer_syscall_pre_impl_recvmsg(long long s, long long msg,
-                                          long long flags);
-void __sanitizer_syscall_post_impl_recvmsg(long long res, long long s,
-                                           long long msg, long long flags);
-void __sanitizer_syscall_pre_impl_sendmsg(long long s, long long msg,
-                                          long long flags);
-void __sanitizer_syscall_post_impl_sendmsg(long long res, long long s,
-                                           long long msg, long long flags);
-void __sanitizer_syscall_pre_impl_recvfrom(long long s, long long buf,
-                                           long long len, long long flags,
-                                           long long from,
-                                           long long fromlenaddr);
-void __sanitizer_syscall_post_impl_recvfrom(long long res, long long s,
-                                            long long buf, long long len,
-                                            long long flags, long long from,
-                                            long long fromlenaddr);
-void __sanitizer_syscall_pre_impl_accept(long long s, long long name,
-                                         long long anamelen);
-void __sanitizer_syscall_post_impl_accept(long long res, long long s,
-                                          long long name, long long anamelen);
-void __sanitizer_syscall_pre_impl_getpeername(long long fdes, long long asa,
-                                              long long alen);
-void __sanitizer_syscall_post_impl_getpeername(long long res, long long fdes,
-                                               long long asa, long long alen);
-void __sanitizer_syscall_pre_impl_getsockname(long long fdes, long long asa,
-                                              long long alen);
-void __sanitizer_syscall_post_impl_getsockname(long long res, long long fdes,
-                                               long long asa, long long alen);
-void __sanitizer_syscall_pre_impl_access(long long path, long long flags);
-void __sanitizer_syscall_post_impl_access(long long res, long long path,
-                                          long long flags);
-void __sanitizer_syscall_pre_impl_chflags(long long path, long long flags);
-void __sanitizer_syscall_post_impl_chflags(long long res, long long path,
-                                           long long flags);
-void __sanitizer_syscall_pre_impl_fchflags(long long fd, long long flags);
-void __sanitizer_syscall_post_impl_fchflags(long long res, long long fd,
-                                            long long flags);
-void __sanitizer_syscall_pre_impl_sync(void);
-void __sanitizer_syscall_post_impl_sync(long long res);
-void __sanitizer_syscall_pre_impl_kill(long long pid, long long signum);
-void __sanitizer_syscall_post_impl_kill(long long res, long long pid,
-                                        long long signum);
-void __sanitizer_syscall_pre_impl_compat_43_stat43(long long path,
-                                                   long long ub);
-void __sanitizer_syscall_post_impl_compat_43_stat43(long long res,
-                                                    long long path,
-                                                    long long ub);
-void __sanitizer_syscall_pre_impl_getppid(void);
-void __sanitizer_syscall_post_impl_getppid(long long res);
-void __sanitizer_syscall_pre_impl_compat_43_lstat43(long long path,
-                                                    long long ub);
-void __sanitizer_syscall_post_impl_compat_43_lstat43(long long res,
-                                                     long long path,
-                                                     long long ub);
-void __sanitizer_syscall_pre_impl_dup(long long fd);
-void __sanitizer_syscall_post_impl_dup(long long res, long long fd);
-void __sanitizer_syscall_pre_impl_pipe(void);
-void __sanitizer_syscall_post_impl_pipe(long long res);
-void __sanitizer_syscall_pre_impl_getegid(void);
-void __sanitizer_syscall_post_impl_getegid(long long res);
-void __sanitizer_syscall_pre_impl_profil(long long samples, long long size,
-                                         long long offset, long long scale);
-void __sanitizer_syscall_post_impl_profil(long long res, long long samples,
-                                          long long size, long long offset,
-                                          long long scale);
-void __sanitizer_syscall_pre_impl_ktrace(long long fname, long long ops,
-                                         long long facs, long long pid);
-void __sanitizer_syscall_post_impl_ktrace(long long res, long long fname,
-                                          long long ops, long long facs,
-                                          long long pid);
-void __sanitizer_syscall_pre_impl_compat_13_sigaction13(long long signum,
-                                                        long long nsa,
-                                                        long long osa);
-void __sanitizer_syscall_post_impl_compat_13_sigaction13(long long res,
-                                                         long long signum,
-                                                         long long nsa,
-                                                         long long osa);
-void __sanitizer_syscall_pre_impl_getgid(void);
-void __sanitizer_syscall_post_impl_getgid(long long res);
-void __sanitizer_syscall_pre_impl_compat_13_sigprocmask13(long long how,
-                                                          long long mask);
-void __sanitizer_syscall_post_impl_compat_13_sigprocmask13(long long res,
-                                                           long long how,
-                                                           long long mask);
-void __sanitizer_syscall_pre_impl___getlogin(long long namebuf,
-                                             long long namelen);
-void __sanitizer_syscall_post_impl___getlogin(long long res, long long namebuf,
-                                              long long namelen);
-void __sanitizer_syscall_pre_impl___setlogin(long long namebuf);
-void __sanitizer_syscall_post_impl___setlogin(long long res, long long namebuf);
-void __sanitizer_syscall_pre_impl_acct(long long path);
-void __sanitizer_syscall_post_impl_acct(long long res, long long path);
-void __sanitizer_syscall_pre_impl_compat_13_sigpending13(void);
-void __sanitizer_syscall_post_impl_compat_13_sigpending13(long long res);
-void __sanitizer_syscall_pre_impl_compat_13_sigaltstack13(long long nss,
-                                                          long long oss);
-void __sanitizer_syscall_post_impl_compat_13_sigaltstack13(long long res,
-                                                           long long nss,
-                                                           long long oss);
-void __sanitizer_syscall_pre_impl_ioctl(long long fd, long long com,
-                                        long long data);
-void __sanitizer_syscall_post_impl_ioctl(long long res, long long fd,
-                                         long long com, long long data);
-void __sanitizer_syscall_pre_impl_compat_12_oreboot(long long opt);
-void __sanitizer_syscall_post_impl_compat_12_oreboot(long long res,
-                                                     long long opt);
-void __sanitizer_syscall_pre_impl_revoke(long long path);
-void __sanitizer_syscall_post_impl_revoke(long long res, long long path);
-void __sanitizer_syscall_pre_impl_symlink(long long path, long long link);
-void __sanitizer_syscall_post_impl_symlink(long long res, long long path,
-                                           long long link);
-void __sanitizer_syscall_pre_impl_readlink(long long path, long long buf,
-                                           long long count);
-void __sanitizer_syscall_post_impl_readlink(long long res, long long path,
-                                            long long buf, long long count);
-void __sanitizer_syscall_pre_impl_execve(long long path, long long argp,
-                                         long long envp);
-void __sanitizer_syscall_post_impl_execve(long long res, long long path,
-                                          long long argp, long long envp);
-void __sanitizer_syscall_pre_impl_umask(long long newmask);
-void __sanitizer_syscall_post_impl_umask(long long res, long long newmask);
-void __sanitizer_syscall_pre_impl_chroot(long long path);
-void __sanitizer_syscall_post_impl_chroot(long long res, long long path);
-void __sanitizer_syscall_pre_impl_compat_43_fstat43(long long fd, long long sb);
-void __sanitizer_syscall_post_impl_compat_43_fstat43(long long res,
-                                                     long long fd,
-                                                     long long sb);
-void __sanitizer_syscall_pre_impl_compat_43_ogetkerninfo(long long op,
-                                                         long long where,
-                                                         long long size,
-                                                         long long arg);
-void __sanitizer_syscall_post_impl_compat_43_ogetkerninfo(long long res,
-                                                          long long op,
-                                                          long long where,
-                                                          long long size,
-                                                          long long arg);
-void __sanitizer_syscall_pre_impl_compat_43_ogetpagesize(void);
-void __sanitizer_syscall_post_impl_compat_43_ogetpagesize(long long res);
-void __sanitizer_syscall_pre_impl_compat_12_msync(long long addr,
-                                                  long long len);
-void __sanitizer_syscall_post_impl_compat_12_msync(long long res,
-                                                   long long addr,
-                                                   long long len);
-void __sanitizer_syscall_pre_impl_vfork(void);
-void __sanitizer_syscall_post_impl_vfork(long long res);
-/* syscall 67 has been skipped */
-/* syscall 68 has been skipped */
-/* syscall 69 has been skipped */
-/* syscall 70 has been skipped */
-void __sanitizer_syscall_pre_impl_compat_43_ommap(long long addr, long long len,
-                                                  long long prot,
-                                                  long long flags, long long fd,
-                                                  long long pos);
-void __sanitizer_syscall_post_impl_compat_43_ommap(
-    long long res, long long addr, long long len, long long prot,
-    long long flags, long long fd, long long pos);
-void __sanitizer_syscall_pre_impl_vadvise(long long anom);
-void __sanitizer_syscall_post_impl_vadvise(long long res, long long anom);
-void __sanitizer_syscall_pre_impl_munmap(long long addr, long long len);
-void __sanitizer_syscall_post_impl_munmap(long long res, long long addr,
-                                          long long len);
-void __sanitizer_syscall_pre_impl_mprotect(long long addr, long long len,
-                                           long long prot);
-void __sanitizer_syscall_post_impl_mprotect(long long res, long long addr,
-                                            long long len, long long prot);
-void __sanitizer_syscall_pre_impl_madvise(long long addr, long long len,
-                                          long long behav);
-void __sanitizer_syscall_post_impl_madvise(long long res, long long addr,
-                                           long long len, long long behav);
-/* syscall 76 has been skipped */
-/* syscall 77 has been skipped */
-void __sanitizer_syscall_pre_impl_mincore(long long addr, long long len,
-                                          long long vec);
-void __sanitizer_syscall_post_impl_mincore(long long res, long long addr,
-                                           long long len, long long vec);
-void __sanitizer_syscall_pre_impl_getgroups(long long gidsetsize,
-                                            long long gidset);
-void __sanitizer_syscall_post_impl_getgroups(long long res,
-                                             long long gidsetsize,
-                                             long long gidset);
-void __sanitizer_syscall_pre_impl_setgroups(long long gidsetsize,
-                                            long long gidset);
-void __sanitizer_syscall_post_impl_setgroups(long long res,
-                                             long long gidsetsize,
-                                             long long gidset);
-void __sanitizer_syscall_pre_impl_getpgrp(void);
-void __sanitizer_syscall_post_impl_getpgrp(long long res);
-void __sanitizer_syscall_pre_impl_setpgid(long long pid, long long pgid);
-void __sanitizer_syscall_post_impl_setpgid(long long res, long long pid,
-                                           long long pgid);
-void __sanitizer_syscall_pre_impl_compat_50_setitimer(long long which,
-                                                      long long itv,
-                                                      long long oitv);
-void __sanitizer_syscall_post_impl_compat_50_setitimer(long long res,
-                                                       long long which,
-                                                       long long itv,
-                                                       long long oitv);
-void __sanitizer_syscall_pre_impl_compat_43_owait(void);
-void __sanitizer_syscall_post_impl_compat_43_owait(long long res);
-void __sanitizer_syscall_pre_impl_compat_12_oswapon(long long name);
-void __sanitizer_syscall_post_impl_compat_12_oswapon(long long res,
-                                                     long long name);
-void __sanitizer_syscall_pre_impl_compat_50_getitimer(long long which,
-                                                      long long itv);
-void __sanitizer_syscall_post_impl_compat_50_getitimer(long long res,
-                                                       long long which,
-                                                       long long itv);
-void __sanitizer_syscall_pre_impl_compat_43_ogethostname(long long hostname,
-                                                         long long len);
-void __sanitizer_syscall_post_impl_compat_43_ogethostname(long long res,
-                                                          long long hostname,
-                                                          long long len);
-void __sanitizer_syscall_pre_impl_compat_43_osethostname(long long hostname,
-                                                         long long len);
-void __sanitizer_syscall_post_impl_compat_43_osethostname(long long res,
-                                                          long long hostname,
-                                                          long long len);
-void __sanitizer_syscall_pre_impl_compat_43_ogetdtablesize(void);
-void __sanitizer_syscall_post_impl_compat_43_ogetdtablesize(long long res);
-void __sanitizer_syscall_pre_impl_dup2(long long from, long long to);
-void __sanitizer_syscall_post_impl_dup2(long long res, long long from,
-                                        long long to);
-/* syscall 91 has been skipped */
-void __sanitizer_syscall_pre_impl_fcntl(long long fd, long long cmd,
-                                        long long arg);
-void __sanitizer_syscall_post_impl_fcntl(long long res, long long fd,
-                                         long long cmd, long long arg);
-void __sanitizer_syscall_pre_impl_compat_50_select(long long nd, long long in,
-                                                   long long ou, long long ex,
-                                                   long long tv);
-void __sanitizer_syscall_post_impl_compat_50_select(long long res, long long nd,
-                                                    long long in, long long ou,
-                                                    long long ex, long long tv);
-/* syscall 94 has been skipped */
-void __sanitizer_syscall_pre_impl_fsync(long long fd);
-void __sanitizer_syscall_post_impl_fsync(long long res, long long fd);
-void __sanitizer_syscall_pre_impl_setpriority(long long which, long long who,
-                                              long long prio);
-void __sanitizer_syscall_post_impl_setpriority(long long res, long long which,
-                                               long long who, long long prio);
-void __sanitizer_syscall_pre_impl_compat_30_socket(long long domain,
-                                                   long long type,
-                                                   long long protocol);
-void __sanitizer_syscall_post_impl_compat_30_socket(long long res,
-                                                    long long domain,
-                                                    long long type,
-                                                    long long protocol);
-void __sanitizer_syscall_pre_impl_connect(long long s, long long name,
-                                          long long namelen);
-void __sanitizer_syscall_post_impl_connect(long long res, long long s,
-                                           long long name, long long namelen);
-void __sanitizer_syscall_pre_impl_compat_43_oaccept(long long s, long long name,
-                                                    long long anamelen);
-void __sanitizer_syscall_post_impl_compat_43_oaccept(long long res, long long s,
-                                                     long long name,
-                                                     long long anamelen);
-void __sanitizer_syscall_pre_impl_getpriority(long long which, long long who);
-void __sanitizer_syscall_post_impl_getpriority(long long res, long long which,
-                                               long long who);
-void __sanitizer_syscall_pre_impl_compat_43_osend(long long s, long long buf,
-                                                  long long len,
-                                                  long long flags);
-void __sanitizer_syscall_post_impl_compat_43_osend(long long res, long long s,
-                                                   long long buf, long long len,
-                                                   long long flags);
-void __sanitizer_syscall_pre_impl_compat_43_orecv(long long s, long long buf,
-                                                  long long len,
-                                                  long long flags);
-void __sanitizer_syscall_post_impl_compat_43_orecv(long long res, long long s,
-                                                   long long buf, long long len,
-                                                   long long flags);
-void __sanitizer_syscall_pre_impl_compat_13_sigreturn13(long long sigcntxp);
-void __sanitizer_syscall_post_impl_compat_13_sigreturn13(long long res,
-                                                         long long sigcntxp);
-void __sanitizer_syscall_pre_impl_bind(long long s, long long name,
-                                       long long namelen);
-void __sanitizer_syscall_post_impl_bind(long long res, long long s,
-                                        long long name, long long namelen);
-void __sanitizer_syscall_pre_impl_setsockopt(long long s, long long level,
-                                             long long name, long long val,
-                                             long long valsize);
-void __sanitizer_syscall_post_impl_setsockopt(long long res, long long s,
-                                              long long level, long long name,
-                                              long long val, long long valsize);
-void __sanitizer_syscall_pre_impl_listen(long long s, long long backlog);
-void __sanitizer_syscall_post_impl_listen(long long res, long long s,
-                                          long long backlog);
-/* syscall 107 has been skipped */
-void __sanitizer_syscall_pre_impl_compat_43_osigvec(long long signum,
-                                                    long long nsv,
-                                                    long long osv);
-void __sanitizer_syscall_post_impl_compat_43_osigvec(long long res,
-                                                     long long signum,
-                                                     long long nsv,
-                                                     long long osv);
-void __sanitizer_syscall_pre_impl_compat_43_osigblock(long long mask);
-void __sanitizer_syscall_post_impl_compat_43_osigblock(long long res,
-                                                       long long mask);
-void __sanitizer_syscall_pre_impl_compat_43_osigsetmask(long long mask);
-void __sanitizer_syscall_post_impl_compat_43_osigsetmask(long long res,
-                                                         long long mask);
-void __sanitizer_syscall_pre_impl_compat_13_sigsuspend13(long long mask);
-void __sanitizer_syscall_post_impl_compat_13_sigsuspend13(long long res,
-                                                          long long mask);
-void __sanitizer_syscall_pre_impl_compat_43_osigstack(long long nss,
-                                                      long long oss);
-void __sanitizer_syscall_post_impl_compat_43_osigstack(long long res,
-                                                       long long nss,
-                                                       long long oss);
-void __sanitizer_syscall_pre_impl_compat_43_orecvmsg(long long s, long long msg,
-                                                     long long flags);
-void __sanitizer_syscall_post_impl_compat_43_orecvmsg(long long res,
-                                                      long long s,
-                                                      long long msg,
-                                                      long long flags);
-void __sanitizer_syscall_pre_impl_compat_43_osendmsg(long long s, long long msg,
-                                                     long long flags);
-void __sanitizer_syscall_post_impl_compat_43_osendmsg(long long res,
-                                                      long long s,
-                                                      long long msg,
-                                                      long long flags);
-/* syscall 115 has been skipped */
-void __sanitizer_syscall_pre_impl_compat_50_gettimeofday(long long tp,
-                                                         long long tzp);
-void __sanitizer_syscall_post_impl_compat_50_gettimeofday(long long res,
-                                                          long long tp,
-                                                          long long tzp);
-void __sanitizer_syscall_pre_impl_compat_50_getrusage(long long who,
-                                                      long long rusage);
-void __sanitizer_syscall_post_impl_compat_50_getrusage(long long res,
-                                                       long long who,
-                                                       long long rusage);
-void __sanitizer_syscall_pre_impl_getsockopt(long long s, long long level,
-                                             long long name, long long val,
-                                             long long avalsize);
-void __sanitizer_syscall_post_impl_getsockopt(long long res, long long s,
-                                              long long level, long long name,
-                                              long long val,
-                                              long long avalsize);
-/* syscall 119 has been skipped */
-void __sanitizer_syscall_pre_impl_readv(long long fd, long long iovp,
-                                        long long iovcnt);
-void __sanitizer_syscall_post_impl_readv(long long res, long long fd,
-                                         long long iovp, long long iovcnt);
-void __sanitizer_syscall_pre_impl_writev(long long fd, long long iovp,
-                                         long long iovcnt);
-void __sanitizer_syscall_post_impl_writev(long long res, long long fd,
-                                          long long iovp, long long iovcnt);
-void __sanitizer_syscall_pre_impl_compat_50_settimeofday(long long tv,
-                                                         long long tzp);
-void __sanitizer_syscall_post_impl_compat_50_settimeofday(long long res,
-                                                          long long tv,
-                                                          long long tzp);
-void __sanitizer_syscall_pre_impl_fchown(long long fd, long long uid,
-                                         long long gid);
-void __sanitizer_syscall_post_impl_fchown(long long res, long long fd,
-                                          long long uid, long long gid);
-void __sanitizer_syscall_pre_impl_fchmod(long long fd, long long mode);
-void __sanitizer_syscall_post_impl_fchmod(long long res, long long fd,
-                                          long long mode);
-void __sanitizer_syscall_pre_impl_compat_43_orecvfrom(
-    long long s, long long buf, long long len, long long flags, long long from,
-    long long fromlenaddr);
-void __sanitizer_syscall_post_impl_compat_43_orecvfrom(
-    long long res, long long s, long long buf, long long len, long long flags,
-    long long from, long long fromlenaddr);
-void __sanitizer_syscall_pre_impl_setreuid(long long ruid, long long euid);
-void __sanitizer_syscall_post_impl_setreuid(long long res, long long ruid,
-                                            long long euid);
-void __sanitizer_syscall_pre_impl_setregid(long long rgid, long long egid);
-void __sanitizer_syscall_post_impl_setregid(long long res, long long rgid,
-                                            long long egid);
-void __sanitizer_syscall_pre_impl_rename(long long from, long long to);
-void __sanitizer_syscall_post_impl_rename(long long res, long long from,
-                                          long long to);
-void __sanitizer_syscall_pre_impl_compat_43_otruncate(long long path,
-                                                      long long length);
-void __sanitizer_syscall_post_impl_compat_43_otruncate(long long res,
-                                                       long long path,
-                                                       long long length);
-void __sanitizer_syscall_pre_impl_compat_43_oftruncate(long long fd,
-                                                       long long length);
-void __sanitizer_syscall_post_impl_compat_43_oftruncate(long long res,
-                                                        long long fd,
-                                                        long long length);
-void __sanitizer_syscall_pre_impl_flock(long long fd, long long how);
-void __sanitizer_syscall_post_impl_flock(long long res, long long fd,
-                                         long long how);
-void __sanitizer_syscall_pre_impl_mkfifo(long long path, long long mode);
-void __sanitizer_syscall_post_impl_mkfifo(long long res, long long path,
-                                          long long mode);
-void __sanitizer_syscall_pre_impl_sendto(long long s, long long buf,
-                                         long long len, long long flags,
-                                         long long to, long long tolen);
-void __sanitizer_syscall_post_impl_sendto(long long res, long long s,
-                                          long long buf, long long len,
-                                          long long flags, long long to,
-                                          long long tolen);
-void __sanitizer_syscall_pre_impl_shutdown(long long s, long long how);
-void __sanitizer_syscall_post_impl_shutdown(long long res, long long s,
-                                            long long how);
-void __sanitizer_syscall_pre_impl_socketpair(long long domain, long long type,
-                                             long long protocol, long long rsv);
-void __sanitizer_syscall_post_impl_socketpair(long long res, long long domain,
-                                              long long type,
-                                              long long protocol,
-                                              long long rsv);
-void __sanitizer_syscall_pre_impl_mkdir(long long path, long long mode);
-void __sanitizer_syscall_post_impl_mkdir(long long res, long long path,
-                                         long long mode);
-void __sanitizer_syscall_pre_impl_rmdir(long long path);
-void __sanitizer_syscall_post_impl_rmdir(long long res, long long path);
-void __sanitizer_syscall_pre_impl_compat_50_utimes(long long path,
-                                                   long long tptr);
-void __sanitizer_syscall_post_impl_compat_50_utimes(long long res,
-                                                    long long path,
-                                                    long long tptr);
-/* syscall 139 has been skipped */
-void __sanitizer_syscall_pre_impl_compat_50_adjtime(long long delta,
-                                                    long long olddelta);
-void __sanitizer_syscall_post_impl_compat_50_adjtime(long long res,
-                                                     long long delta,
-                                                     long long olddelta);
-void __sanitizer_syscall_pre_impl_compat_43_ogetpeername(long long fdes,
-                                                         long long asa,
-                                                         long long alen);
-void __sanitizer_syscall_post_impl_compat_43_ogetpeername(long long res,
-                                                          long long fdes,
-                                                          long long asa,
-                                                          long long alen);
-void __sanitizer_syscall_pre_impl_compat_43_ogethostid(void);
-void __sanitizer_syscall_post_impl_compat_43_ogethostid(long long res);
-void __sanitizer_syscall_pre_impl_compat_43_osethostid(long long hostid);
-void __sanitizer_syscall_post_impl_compat_43_osethostid(long long res,
-                                                        long long hostid);
-void __sanitizer_syscall_pre_impl_compat_43_ogetrlimit(long long which,
-                                                       long long rlp);
-void __sanitizer_syscall_post_impl_compat_43_ogetrlimit(long long res,
-                                                        long long which,
-                                                        long long rlp);
-void __sanitizer_syscall_pre_impl_compat_43_osetrlimit(long long which,
-                                                       long long rlp);
-void __sanitizer_syscall_post_impl_compat_43_osetrlimit(long long res,
-                                                        long long which,
-                                                        long long rlp);
-void __sanitizer_syscall_pre_impl_compat_43_okillpg(long long pgid,
-                                                    long long signum);
-void __sanitizer_syscall_post_impl_compat_43_okillpg(long long res,
-                                                     long long pgid,
-                                                     long long signum);
-void __sanitizer_syscall_pre_impl_setsid(void);
-void __sanitizer_syscall_post_impl_setsid(long long res);
-void __sanitizer_syscall_pre_impl_compat_50_quotactl(long long path,
-                                                     long long cmd,
-                                                     long long uid,
-                                                     long long arg);
-void __sanitizer_syscall_post_impl_compat_50_quotactl(
-    long long res, long long path, long long cmd, long long uid, long long arg);
-void __sanitizer_syscall_pre_impl_compat_43_oquota(void);
-void __sanitizer_syscall_post_impl_compat_43_oquota(long long res);
-void __sanitizer_syscall_pre_impl_compat_43_ogetsockname(long long fdec,
-                                                         long long asa,
-                                                         long long alen);
-void __sanitizer_syscall_post_impl_compat_43_ogetsockname(long long res,
-                                                          long long fdec,
-                                                          long long asa,
-                                                          long long alen);
-/* syscall 151 has been skipped */
-/* syscall 152 has been skipped */
-/* syscall 153 has been skipped */
-/* syscall 154 has been skipped */
-void __sanitizer_syscall_pre_impl_nfssvc(long long flag, long long argp);
-void __sanitizer_syscall_post_impl_nfssvc(long long res, long long flag,
-                                          long long argp);
-void __sanitizer_syscall_pre_impl_compat_43_ogetdirentries(long long fd,
-                                                           long long buf,
-                                                           long long count,
-                                                           long long basep);
-void __sanitizer_syscall_post_impl_compat_43_ogetdirentries(long long res,
-                                                            long long fd,
-                                                            long long buf,
-                                                            long long count,
-                                                            long long basep);
-void __sanitizer_syscall_pre_impl_compat_20_statfs(long long path,
-                                                   long long buf);
-void __sanitizer_syscall_post_impl_compat_20_statfs(long long res,
-                                                    long long path,
-                                                    long long buf);
-void __sanitizer_syscall_pre_impl_compat_20_fstatfs(long long fd,
-                                                    long long buf);
-void __sanitizer_syscall_post_impl_compat_20_fstatfs(long long res,
-                                                     long long fd,
-                                                     long long buf);
-/* syscall 159 has been skipped */
-/* syscall 160 has been skipped */
-void __sanitizer_syscall_pre_impl_compat_30_getfh(long long fname,
-                                                  long long fhp);
-void __sanitizer_syscall_post_impl_compat_30_getfh(long long res,
-                                                   long long fname,
-                                                   long long fhp);
-void __sanitizer_syscall_pre_impl_compat_09_ogetdomainname(long long domainname,
-                                                           long long len);
-void __sanitizer_syscall_post_impl_compat_09_ogetdomainname(
-    long long res, long long domainname, long long len);
-void __sanitizer_syscall_pre_impl_compat_09_osetdomainname(long long domainname,
-                                                           long long len);
-void __sanitizer_syscall_post_impl_compat_09_osetdomainname(
-    long long res, long long domainname, long long len);
-void __sanitizer_syscall_pre_impl_compat_09_ouname(long long name);
-void __sanitizer_syscall_post_impl_compat_09_ouname(long long res,
-                                                    long long name);
-void __sanitizer_syscall_pre_impl_sysarch(long long op, long long parms);
-void __sanitizer_syscall_post_impl_sysarch(long long res, long long op,
-                                           long long parms);
-/* syscall 166 has been skipped */
-/* syscall 167 has been skipped */
-/* syscall 168 has been skipped */
-#if !defined(_LP64)
-void __sanitizer_syscall_pre_impl_compat_10_osemsys(long long which,
-                                                    long long a2, long long a3,
-                                                    long long a4, long long a5);
-void __sanitizer_syscall_post_impl_compat_10_osemsys(long long res,
-                                                     long long which,
-                                                     long long a2, long long a3,
-                                                     long long a4,
-                                                     long long a5);
-#else
-/* syscall 169 has been skipped */
-#endif
-#if !defined(_LP64)
-void __sanitizer_syscall_pre_impl_compat_10_omsgsys(long long which,
-                                                    long long a2, long long a3,
-                                                    long long a4, long long a5,
-                                                    long long a6);
-void __sanitizer_syscall_post_impl_compat_10_omsgsys(long long res,
-                                                     long long which,
-                                                     long long a2, long long a3,
-                                                     long long a4, long long a5,
-                                                     long long a6);
-#else
-/* syscall 170 has been skipped */
-#endif
-#if !defined(_LP64)
-void __sanitizer_syscall_pre_impl_compat_10_oshmsys(long long which,
-                                                    long long a2, long long a3,
-                                                    long long a4);
-void __sanitizer_syscall_post_impl_compat_10_oshmsys(long long res,
-                                                     long long which,
-                                                     long long a2, long long a3,
-                                                     long long a4);
-#else
-/* syscall 171 has been skipped */
-#endif
-/* syscall 172 has been skipped */
-void __sanitizer_syscall_pre_impl_pread(long long fd, long long buf,
-                                        long long nbyte, long long PAD,
-                                        long long offset);
-void __sanitizer_syscall_post_impl_pread(long long res, long long fd,
-                                         long long buf, long long nbyte,
-                                         long long PAD, long long offset);
-void __sanitizer_syscall_pre_impl_pwrite(long long fd, long long buf,
-                                         long long nbyte, long long PAD,
-                                         long long offset);
-void __sanitizer_syscall_post_impl_pwrite(long long res, long long fd,
-                                          long long buf, long long nbyte,
-                                          long long PAD, long long offset);
-void __sanitizer_syscall_pre_impl_compat_30_ntp_gettime(long long ntvp);
-void __sanitizer_syscall_post_impl_compat_30_ntp_gettime(long long res,
-                                                         long long ntvp);
-#if defined(NTP) || !defined(_KERNEL_OPT)
-void __sanitizer_syscall_pre_impl_ntp_adjtime(long long tp);
-void __sanitizer_syscall_post_impl_ntp_adjtime(long long res, long long tp);
-#else
-/* syscall 176 has been skipped */
-#endif
-/* syscall 177 has been skipped */
-/* syscall 178 has been skipped */
-/* syscall 179 has been skipped */
-/* syscall 180 has been skipped */
-void __sanitizer_syscall_pre_impl_setgid(long long gid);
-void __sanitizer_syscall_post_impl_setgid(long long res, long long gid);
-void __sanitizer_syscall_pre_impl_setegid(long long egid);
-void __sanitizer_syscall_post_impl_setegid(long long res, long long egid);
-void __sanitizer_syscall_pre_impl_seteuid(long long euid);
-void __sanitizer_syscall_post_impl_seteuid(long long res, long long euid);
-void __sanitizer_syscall_pre_impl_lfs_bmapv(long long fsidp, long long blkiov,
-                                            long long blkcnt);
-void __sanitizer_syscall_post_impl_lfs_bmapv(long long res, long long fsidp,
-                                             long long blkiov,
-                                             long long blkcnt);
-void __sanitizer_syscall_pre_impl_lfs_markv(long long fsidp, long long blkiov,
-                                            long long blkcnt);
-void __sanitizer_syscall_post_impl_lfs_markv(long long res, long long fsidp,
-                                             long long blkiov,
-                                             long long blkcnt);
-void __sanitizer_syscall_pre_impl_lfs_segclean(long long fsidp,
-                                               long long segment);
-void __sanitizer_syscall_post_impl_lfs_segclean(long long res, long long fsidp,
-                                                long long segment);
-void __sanitizer_syscall_pre_impl_compat_50_lfs_segwait(long long fsidp,
-                                                        long long tv);
-void __sanitizer_syscall_post_impl_compat_50_lfs_segwait(long long res,
-                                                         long long fsidp,
-                                                         long long tv);
-void __sanitizer_syscall_pre_impl_compat_12_stat12(long long path,
-                                                   long long ub);
-void __sanitizer_syscall_post_impl_compat_12_stat12(long long res,
-                                                    long long path,
-                                                    long long ub);
-void __sanitizer_syscall_pre_impl_compat_12_fstat12(long long fd, long long sb);
-void __sanitizer_syscall_post_impl_compat_12_fstat12(long long res,
-                                                     long long fd,
-                                                     long long sb);
-void __sanitizer_syscall_pre_impl_compat_12_lstat12(long long path,
-                                                    long long ub);
-void __sanitizer_syscall_post_impl_compat_12_lstat12(long long res,
-                                                     long long path,
-                                                     long long ub);
-void __sanitizer_syscall_pre_impl_pathconf(long long path, long long name);
-void __sanitizer_syscall_post_impl_pathconf(long long res, long long path,
-                                            long long name);
-void __sanitizer_syscall_pre_impl_fpathconf(long long fd, long long name);
-void __sanitizer_syscall_post_impl_fpathconf(long long res, long long fd,
-                                             long long name);
-void __sanitizer_syscall_pre_impl_getsockopt2(long long s, long long level,
-                                              long long name, long long val,
-                                              long long avalsize);
-void __sanitizer_syscall_post_impl_getsockopt2(long long res, long long s,
-                                               long long level, long long name,
-                                               long long val,
-                                               long long avalsize);
-void __sanitizer_syscall_pre_impl_getrlimit(long long which, long long rlp);
-void __sanitizer_syscall_post_impl_getrlimit(long long res, long long which,
-                                             long long rlp);
-void __sanitizer_syscall_pre_impl_setrlimit(long long which, long long rlp);
-void __sanitizer_syscall_post_impl_setrlimit(long long res, long long which,
-                                             long long rlp);
-void __sanitizer_syscall_pre_impl_compat_12_getdirentries(long long fd,
-                                                          long long buf,
-                                                          long long count,
-                                                          long long basep);
-void __sanitizer_syscall_post_impl_compat_12_getdirentries(long long res,
-                                                           long long fd,
-                                                           long long buf,
-                                                           long long count,
-                                                           long long basep);
-void __sanitizer_syscall_pre_impl_mmap(long long addr, long long len,
-                                       long long prot, long long flags,
-                                       long long fd, long long PAD,
-                                       long long pos);
-void __sanitizer_syscall_post_impl_mmap(long long res, long long addr,
-                                        long long len, long long prot,
-                                        long long flags, long long fd,
-                                        long long PAD, long long pos);
-void __sanitizer_syscall_pre_impl___syscall(long long code, long long arg0,
-                                            long long arg1, long long arg2,
-                                            long long arg3, long long arg4,
-                                            long long arg5, long long arg6,
-                                            long long arg7);
-void __sanitizer_syscall_post_impl___syscall(long long res, long long code,
-                                             long long arg0, long long arg1,
-                                             long long arg2, long long arg3,
-                                             long long arg4, long long arg5,
-                                             long long arg6, long long arg7);
-void __sanitizer_syscall_pre_impl_lseek(long long fd, long long PAD,
-                                        long long offset, long long whence);
-void __sanitizer_syscall_post_impl_lseek(long long res, long long fd,
-                                         long long PAD, long long offset,
-                                         long long whence);
-void __sanitizer_syscall_pre_impl_truncate(long long path, long long PAD,
-                                           long long length);
-void __sanitizer_syscall_post_impl_truncate(long long res, long long path,
-                                            long long PAD, long long length);
-void __sanitizer_syscall_pre_impl_ftruncate(long long fd, long long PAD,
-                                            long long length);
-void __sanitizer_syscall_post_impl_ftruncate(long long res, long long fd,
-                                             long long PAD, long long length);
-void __sanitizer_syscall_pre_impl___sysctl(long long name, long long namelen,
-                                           long long oldv, long long oldlenp,
-                                           long long newv, long long newlen);
-void __sanitizer_syscall_post_impl___sysctl(long long res, long long name,
-                                            long long namelen, long long oldv,
-                                            long long oldlenp, long long newv,
-                                            long long newlen);
-void __sanitizer_syscall_pre_impl_mlock(long long addr, long long len);
-void __sanitizer_syscall_post_impl_mlock(long long res, long long addr,
-                                         long long len);
-void __sanitizer_syscall_pre_impl_munlock(long long addr, long long len);
-void __sanitizer_syscall_post_impl_munlock(long long res, long long addr,
-                                           long long len);
-void __sanitizer_syscall_pre_impl_undelete(long long path);
-void __sanitizer_syscall_post_impl_undelete(long long res, long long path);
-void __sanitizer_syscall_pre_impl_compat_50_futimes(long long fd,
-                                                    long long tptr);
-void __sanitizer_syscall_post_impl_compat_50_futimes(long long res,
-                                                     long long fd,
-                                                     long long tptr);
-void __sanitizer_syscall_pre_impl_getpgid(long long pid);
-void __sanitizer_syscall_post_impl_getpgid(long long res, long long pid);
-void __sanitizer_syscall_pre_impl_reboot(long long opt, long long bootstr);
-void __sanitizer_syscall_post_impl_reboot(long long res, long long opt,
-                                          long long bootstr);
-void __sanitizer_syscall_pre_impl_poll(long long fds, long long nfds,
-                                       long long timeout);
-void __sanitizer_syscall_post_impl_poll(long long res, long long fds,
-                                        long long nfds, long long timeout);
-void __sanitizer_syscall_pre_impl_afssys(long long id, long long a1,
-                                         long long a2, long long a3,
-                                         long long a4, long long a5,
-                                         long long a6);
-void __sanitizer_syscall_post_impl_afssys(long long res, long long id,
-                                          long long a1, long long a2,
-                                          long long a3, long long a4,
-                                          long long a5, long long a6);
-/* syscall 211 has been skipped */
-/* syscall 212 has been skipped */
-/* syscall 213 has been skipped */
-/* syscall 214 has been skipped */
-/* syscall 215 has been skipped */
-/* syscall 216 has been skipped */
-/* syscall 217 has been skipped */
-/* syscall 218 has been skipped */
-/* syscall 219 has been skipped */
-void __sanitizer_syscall_pre_impl_compat_14___semctl(long long semid,
-                                                     long long semnum,
-                                                     long long cmd,
-                                                     long long arg);
-void __sanitizer_syscall_post_impl_compat_14___semctl(long long res,
-                                                      long long semid,
-                                                      long long semnum,
-                                                      long long cmd,
-                                                      long long arg);
-void __sanitizer_syscall_pre_impl_semget(long long key, long long nsems,
-                                         long long semflg);
-void __sanitizer_syscall_post_impl_semget(long long res, long long key,
-                                          long long nsems, long long semflg);
-void __sanitizer_syscall_pre_impl_semop(long long semid, long long sops,
-                                        long long nsops);
-void __sanitizer_syscall_post_impl_semop(long long res, long long semid,
-                                         long long sops, long long nsops);
-void __sanitizer_syscall_pre_impl_semconfig(long long flag);
-void __sanitizer_syscall_post_impl_semconfig(long long res, long long flag);
-void __sanitizer_syscall_pre_impl_compat_14_msgctl(long long msqid,
-                                                   long long cmd,
-                                                   long long buf);
-void __sanitizer_syscall_post_impl_compat_14_msgctl(long long res,
-                                                    long long msqid,
-                                                    long long cmd,
-                                                    long long buf);
-void __sanitizer_syscall_pre_impl_msgget(long long key, long long msgflg);
-void __sanitizer_syscall_post_impl_msgget(long long res, long long key,
-                                          long long msgflg);
-void __sanitizer_syscall_pre_impl_msgsnd(long long msqid, long long msgp,
-                                         long long msgsz, long long msgflg);
-void __sanitizer_syscall_post_impl_msgsnd(long long res, long long msqid,
-                                          long long msgp, long long msgsz,
-                                          long long msgflg);
-void __sanitizer_syscall_pre_impl_msgrcv(long long msqid, long long msgp,
-                                         long long msgsz, long long msgtyp,
-                                         long long msgflg);
-void __sanitizer_syscall_post_impl_msgrcv(long long res, long long msqid,
-                                          long long msgp, long long msgsz,
-                                          long long msgtyp, long long msgflg);
-void __sanitizer_syscall_pre_impl_shmat(long long shmid, long long shmaddr,
-                                        long long shmflg);
-void __sanitizer_syscall_post_impl_shmat(long long res, long long shmid,
-                                         long long shmaddr, long long shmflg);
-void __sanitizer_syscall_pre_impl_compat_14_shmctl(long long shmid,
-                                                   long long cmd,
-                                                   long long buf);
-void __sanitizer_syscall_post_impl_compat_14_shmctl(long long res,
-                                                    long long shmid,
-                                                    long long cmd,
-                                                    long long buf);
-void __sanitizer_syscall_pre_impl_shmdt(long long shmaddr);
-void __sanitizer_syscall_post_impl_shmdt(long long res, long long shmaddr);
-void __sanitizer_syscall_pre_impl_shmget(long long key, long long size,
-                                         long long shmflg);
-void __sanitizer_syscall_post_impl_shmget(long long res, long long key,
-                                          long long size, long long shmflg);
-void __sanitizer_syscall_pre_impl_compat_50_clock_gettime(long long clock_id,
-                                                          long long tp);
-void __sanitizer_syscall_post_impl_compat_50_clock_gettime(long long res,
-                                                           long long clock_id,
-                                                           long long tp);
-void __sanitizer_syscall_pre_impl_compat_50_clock_settime(long long clock_id,
-                                                          long long tp);
-void __sanitizer_syscall_post_impl_compat_50_clock_settime(long long res,
-                                                           long long clock_id,
-                                                           long long tp);
-void __sanitizer_syscall_pre_impl_compat_50_clock_getres(long long clock_id,
-                                                         long long tp);
-void __sanitizer_syscall_post_impl_compat_50_clock_getres(long long res,
-                                                          long long clock_id,
-                                                          long long tp);
-void __sanitizer_syscall_pre_impl_timer_create(long long clock_id,
-                                               long long evp,
-                                               long long timerid);
-void __sanitizer_syscall_post_impl_timer_create(long long res,
-                                                long long clock_id,
-                                                long long evp,
-                                                long long timerid);
-void __sanitizer_syscall_pre_impl_timer_delete(long long timerid);
-void __sanitizer_syscall_post_impl_timer_delete(long long res,
-                                                long long timerid);
-void __sanitizer_syscall_pre_impl_compat_50_timer_settime(long long timerid,
-                                                          long long flags,
-                                                          long long value,
-                                                          long long ovalue);
-void __sanitizer_syscall_post_impl_compat_50_timer_settime(long long res,
-                                                           long long timerid,
-                                                           long long flags,
-                                                           long long value,
-                                                           long long ovalue);
-void __sanitizer_syscall_pre_impl_compat_50_timer_gettime(long long timerid,
-                                                          long long value);
-void __sanitizer_syscall_post_impl_compat_50_timer_gettime(long long res,
-                                                           long long timerid,
-                                                           long long value);
-void __sanitizer_syscall_pre_impl_timer_getoverrun(long long timerid);
-void __sanitizer_syscall_post_impl_timer_getoverrun(long long res,
-                                                    long long timerid);
-void __sanitizer_syscall_pre_impl_compat_50_nanosleep(long long rqtp,
-                                                      long long rmtp);
-void __sanitizer_syscall_post_impl_compat_50_nanosleep(long long res,
-                                                       long long rqtp,
-                                                       long long rmtp);
-void __sanitizer_syscall_pre_impl_fdatasync(long long fd);
-void __sanitizer_syscall_post_impl_fdatasync(long long res, long long fd);
-void __sanitizer_syscall_pre_impl_mlockall(long long flags);
-void __sanitizer_syscall_post_impl_mlockall(long long res, long long flags);
-void __sanitizer_syscall_pre_impl_munlockall(void);
-void __sanitizer_syscall_post_impl_munlockall(long long res);
-void __sanitizer_syscall_pre_impl_compat_50___sigtimedwait(long long set,
-                                                           long long info,
-                                                           long long timeout);
-void __sanitizer_syscall_post_impl_compat_50___sigtimedwait(long long res,
-                                                            long long set,
-                                                            long long info,
-                                                            long long timeout);
-void __sanitizer_syscall_pre_impl_sigqueueinfo(long long pid, long long info);
-void __sanitizer_syscall_post_impl_sigqueueinfo(long long res, long long pid,
-                                                long long info);
-void __sanitizer_syscall_pre_impl_modctl(long long cmd, long long arg);
-void __sanitizer_syscall_post_impl_modctl(long long res, long long cmd,
-                                          long long arg);
-void __sanitizer_syscall_pre_impl__ksem_init(long long value, long long idp);
-void __sanitizer_syscall_post_impl__ksem_init(long long res, long long value,
-                                              long long idp);
-void __sanitizer_syscall_pre_impl__ksem_open(long long name, long long oflag,
-                                             long long mode, long long value,
-                                             long long idp);
-void __sanitizer_syscall_post_impl__ksem_open(long long res, long long name,
-                                              long long oflag, long long mode,
-                                              long long value, long long idp);
-void __sanitizer_syscall_pre_impl__ksem_unlink(long long name);
-void __sanitizer_syscall_post_impl__ksem_unlink(long long res, long long name);
-void __sanitizer_syscall_pre_impl__ksem_close(long long id);
-void __sanitizer_syscall_post_impl__ksem_close(long long res, long long id);
-void __sanitizer_syscall_pre_impl__ksem_post(long long id);
-void __sanitizer_syscall_post_impl__ksem_post(long long res, long long id);
-void __sanitizer_syscall_pre_impl__ksem_wait(long long id);
-void __sanitizer_syscall_post_impl__ksem_wait(long long res, long long id);
-void __sanitizer_syscall_pre_impl__ksem_trywait(long long id);
-void __sanitizer_syscall_post_impl__ksem_trywait(long long res, long long id);
-void __sanitizer_syscall_pre_impl__ksem_getvalue(long long id, long long value);
-void __sanitizer_syscall_post_impl__ksem_getvalue(long long res, long long id,
-                                                  long long value);
-void __sanitizer_syscall_pre_impl__ksem_destroy(long long id);
-void __sanitizer_syscall_post_impl__ksem_destroy(long long res, long long id);
-void __sanitizer_syscall_pre_impl__ksem_timedwait(long long id,
-                                                  long long abstime);
-void __sanitizer_syscall_post_impl__ksem_timedwait(long long res, long long id,
-                                                   long long abstime);
-void __sanitizer_syscall_pre_impl_mq_open(long long name, long long oflag,
-                                          long long mode, long long attr);
-void __sanitizer_syscall_post_impl_mq_open(long long res, long long name,
-                                           long long oflag, long long mode,
-                                           long long attr);
-void __sanitizer_syscall_pre_impl_mq_close(long long mqdes);
-void __sanitizer_syscall_post_impl_mq_close(long long res, long long mqdes);
-void __sanitizer_syscall_pre_impl_mq_unlink(long long name);
-void __sanitizer_syscall_post_impl_mq_unlink(long long res, long long name);
-void __sanitizer_syscall_pre_impl_mq_getattr(long long mqdes, long long mqstat);
-void __sanitizer_syscall_post_impl_mq_getattr(long long res, long long mqdes,
-                                              long long mqstat);
-void __sanitizer_syscall_pre_impl_mq_setattr(long long mqdes, long long mqstat,
-                                             long long omqstat);
-void __sanitizer_syscall_post_impl_mq_setattr(long long res, long long mqdes,
-                                              long long mqstat,
-                                              long long omqstat);
-void __sanitizer_syscall_pre_impl_mq_notify(long long mqdes,
-                                            long long notification);
-void __sanitizer_syscall_post_impl_mq_notify(long long res, long long mqdes,
-                                             long long notification);
-void __sanitizer_syscall_pre_impl_mq_send(long long mqdes, long long msg_ptr,
-                                          long long msg_len,
-                                          long long msg_prio);
-void __sanitizer_syscall_post_impl_mq_send(long long res, long long mqdes,
-                                           long long msg_ptr, long long msg_len,
-                                           long long msg_prio);
-void __sanitizer_syscall_pre_impl_mq_receive(long long mqdes, long long msg_ptr,
-                                             long long msg_len,
-                                             long long msg_prio);
-void __sanitizer_syscall_post_impl_mq_receive(long long res, long long mqdes,
-                                              long long msg_ptr,
-                                              long long msg_len,
-                                              long long msg_prio);
-void __sanitizer_syscall_pre_impl_compat_50_mq_timedsend(long long mqdes,
-                                                         long long msg_ptr,
-                                                         long long msg_len,
-                                                         long long msg_prio,
-                                                         long long abs_timeout);
-void __sanitizer_syscall_post_impl_compat_50_mq_timedsend(
-    long long res, long long mqdes, long long msg_ptr, long long msg_len,
-    long long msg_prio, long long abs_timeout);
-void __sanitizer_syscall_pre_impl_compat_50_mq_timedreceive(
-    long long mqdes, long long msg_ptr, long long msg_len, long long msg_prio,
-    long long abs_timeout);
-void __sanitizer_syscall_post_impl_compat_50_mq_timedreceive(
-    long long res, long long mqdes, long long msg_ptr, long long msg_len,
-    long long msg_prio, long long abs_timeout);
-/* syscall 267 has been skipped */
-/* syscall 268 has been skipped */
-/* syscall 269 has been skipped */
-void __sanitizer_syscall_pre_impl___posix_rename(long long from, long long to);
-void __sanitizer_syscall_post_impl___posix_rename(long long res, long long from,
-                                                  long long to);
-void __sanitizer_syscall_pre_impl_swapctl(long long cmd, long long arg,
-                                          long long misc);
-void __sanitizer_syscall_post_impl_swapctl(long long res, long long cmd,
-                                           long long arg, long long misc);
-void __sanitizer_syscall_pre_impl_compat_30_getdents(long long fd,
-                                                     long long buf,
-                                                     long long count);
-void __sanitizer_syscall_post_impl_compat_30_getdents(long long res,
-                                                      long long fd,
-                                                      long long buf,
-                                                      long long count);
-void __sanitizer_syscall_pre_impl_minherit(long long addr, long long len,
-                                           long long inherit);
-void __sanitizer_syscall_post_impl_minherit(long long res, long long addr,
-                                            long long len, long long inherit);
-void __sanitizer_syscall_pre_impl_lchmod(long long path, long long mode);
-void __sanitizer_syscall_post_impl_lchmod(long long res, long long path,
-                                          long long mode);
-void __sanitizer_syscall_pre_impl_lchown(long long path, long long uid,
-                                         long long gid);
-void __sanitizer_syscall_post_impl_lchown(long long res, long long path,
-                                          long long uid, long long gid);
-void __sanitizer_syscall_pre_impl_compat_50_lutimes(long long path,
-                                                    long long tptr);
-void __sanitizer_syscall_post_impl_compat_50_lutimes(long long res,
-                                                     long long path,
-                                                     long long tptr);
-void __sanitizer_syscall_pre_impl___msync13(long long addr, long long len,
-                                            long long flags);
-void __sanitizer_syscall_post_impl___msync13(long long res, long long addr,
-                                             long long len, long long flags);
-void __sanitizer_syscall_pre_impl_compat_30___stat13(long long path,
-                                                     long long ub);
-void __sanitizer_syscall_post_impl_compat_30___stat13(long long res,
-                                                      long long path,
-                                                      long long ub);
-void __sanitizer_syscall_pre_impl_compat_30___fstat13(long long fd,
-                                                      long long sb);
-void __sanitizer_syscall_post_impl_compat_30___fstat13(long long res,
-                                                       long long fd,
-                                                       long long sb);
-void __sanitizer_syscall_pre_impl_compat_30___lstat13(long long path,
-                                                      long long ub);
-void __sanitizer_syscall_post_impl_compat_30___lstat13(long long res,
-                                                       long long path,
-                                                       long long ub);
-void __sanitizer_syscall_pre_impl___sigaltstack14(long long nss, long long oss);
-void __sanitizer_syscall_post_impl___sigaltstack14(long long res, long long nss,
-                                                   long long oss);
-void __sanitizer_syscall_pre_impl___vfork14(void);
-void __sanitizer_syscall_post_impl___vfork14(long long res);
-void __sanitizer_syscall_pre_impl___posix_chown(long long path, long long uid,
-                                                long long gid);
-void __sanitizer_syscall_post_impl___posix_chown(long long res, long long path,
-                                                 long long uid, long long gid);
-void __sanitizer_syscall_pre_impl___posix_fchown(long long fd, long long uid,
-                                                 long long gid);
-void __sanitizer_syscall_post_impl___posix_fchown(long long res, long long fd,
-                                                  long long uid, long long gid);
-void __sanitizer_syscall_pre_impl___posix_lchown(long long path, long long uid,
-                                                 long long gid);
-void __sanitizer_syscall_post_impl___posix_lchown(long long res, long long path,
-                                                  long long uid, long long gid);
-void __sanitizer_syscall_pre_impl_getsid(long long pid);
-void __sanitizer_syscall_post_impl_getsid(long long res, long long pid);
-void __sanitizer_syscall_pre_impl___clone(long long flags, long long stack);
-void __sanitizer_syscall_post_impl___clone(long long res, long long flags,
-                                           long long stack);
-void __sanitizer_syscall_pre_impl_fktrace(long long fd, long long ops,
-                                          long long facs, long long pid);
-void __sanitizer_syscall_post_impl_fktrace(long long res, long long fd,
-                                           long long ops, long long facs,
-                                           long long pid);
-void __sanitizer_syscall_pre_impl_preadv(long long fd, long long iovp,
-                                         long long iovcnt, long long PAD,
-                                         long long offset);
-void __sanitizer_syscall_post_impl_preadv(long long res, long long fd,
-                                          long long iovp, long long iovcnt,
-                                          long long PAD, long long offset);
-void __sanitizer_syscall_pre_impl_pwritev(long long fd, long long iovp,
-                                          long long iovcnt, long long PAD,
-                                          long long offset);
-void __sanitizer_syscall_post_impl_pwritev(long long res, long long fd,
-                                           long long iovp, long long iovcnt,
-                                           long long PAD, long long offset);
-void __sanitizer_syscall_pre_impl_compat_16___sigaction14(long long signum,
-                                                          long long nsa,
-                                                          long long osa);
-void __sanitizer_syscall_post_impl_compat_16___sigaction14(long long res,
-                                                           long long signum,
-                                                           long long nsa,
-                                                           long long osa);
-void __sanitizer_syscall_pre_impl___sigpending14(long long set);
-void __sanitizer_syscall_post_impl___sigpending14(long long res, long long set);
-void __sanitizer_syscall_pre_impl___sigprocmask14(long long how, long long set,
-                                                  long long oset);
-void __sanitizer_syscall_post_impl___sigprocmask14(long long res, long long how,
-                                                   long long set,
-                                                   long long oset);
-void __sanitizer_syscall_pre_impl___sigsuspend14(long long set);
-void __sanitizer_syscall_post_impl___sigsuspend14(long long res, long long set);
-void __sanitizer_syscall_pre_impl_compat_16___sigreturn14(long long sigcntxp);
-void __sanitizer_syscall_post_impl_compat_16___sigreturn14(long long res,
-                                                           long long sigcntxp);
-void __sanitizer_syscall_pre_impl___getcwd(long long bufp, long long length);
-void __sanitizer_syscall_post_impl___getcwd(long long res, long long bufp,
-                                            long long length);
-void __sanitizer_syscall_pre_impl_fchroot(long long fd);
-void __sanitizer_syscall_post_impl_fchroot(long long res, long long fd);
-void __sanitizer_syscall_pre_impl_compat_30_fhopen(long long fhp,
-                                                   long long flags);
-void __sanitizer_syscall_post_impl_compat_30_fhopen(long long res,
-                                                    long long fhp,
-                                                    long long flags);
-void __sanitizer_syscall_pre_impl_compat_30_fhstat(long long fhp, long long sb);
-void __sanitizer_syscall_post_impl_compat_30_fhstat(long long res,
-                                                    long long fhp,
-                                                    long long sb);
-void __sanitizer_syscall_pre_impl_compat_20_fhstatfs(long long fhp,
-                                                     long long buf);
-void __sanitizer_syscall_post_impl_compat_20_fhstatfs(long long res,
-                                                      long long fhp,
-                                                      long long buf);
-void __sanitizer_syscall_pre_impl_compat_50_____semctl13(long long semid,
-                                                         long long semnum,
-                                                         long long cmd,
-                                                         long long arg);
-void __sanitizer_syscall_post_impl_compat_50_____semctl13(long long res,
-                                                          long long semid,
-                                                          long long semnum,
-                                                          long long cmd,
-                                                          long long arg);
-void __sanitizer_syscall_pre_impl_compat_50___msgctl13(long long msqid,
-                                                       long long cmd,
-                                                       long long buf);
-void __sanitizer_syscall_post_impl_compat_50___msgctl13(long long res,
-                                                        long long msqid,
-                                                        long long cmd,
-                                                        long long buf);
-void __sanitizer_syscall_pre_impl_compat_50___shmctl13(long long shmid,
-                                                       long long cmd,
-                                                       long long buf);
-void __sanitizer_syscall_post_impl_compat_50___shmctl13(long long res,
-                                                        long long shmid,
-                                                        long long cmd,
-                                                        long long buf);
-void __sanitizer_syscall_pre_impl_lchflags(long long path, long long flags);
-void __sanitizer_syscall_post_impl_lchflags(long long res, long long path,
-                                            long long flags);
-void __sanitizer_syscall_pre_impl_issetugid(void);
-void __sanitizer_syscall_post_impl_issetugid(long long res);
-void __sanitizer_syscall_pre_impl_utrace(long long label, long long addr,
-                                         long long len);
-void __sanitizer_syscall_post_impl_utrace(long long res, long long label,
-                                          long long addr, long long len);
-void __sanitizer_syscall_pre_impl_getcontext(long long ucp);
-void __sanitizer_syscall_post_impl_getcontext(long long res, long long ucp);
-void __sanitizer_syscall_pre_impl_setcontext(long long ucp);
-void __sanitizer_syscall_post_impl_setcontext(long long res, long long ucp);
-void __sanitizer_syscall_pre_impl__lwp_create(long long ucp, long long flags,
-                                              long long new_lwp);
-void __sanitizer_syscall_post_impl__lwp_create(long long res, long long ucp,
-                                               long long flags,
-                                               long long new_lwp);
-void __sanitizer_syscall_pre_impl__lwp_exit(void);
-void __sanitizer_syscall_post_impl__lwp_exit(long long res);
-void __sanitizer_syscall_pre_impl__lwp_self(void);
-void __sanitizer_syscall_post_impl__lwp_self(long long res);
-void __sanitizer_syscall_pre_impl__lwp_wait(long long wait_for,
-                                            long long departed);
-void __sanitizer_syscall_post_impl__lwp_wait(long long res, long long wait_for,
-                                             long long departed);
-void __sanitizer_syscall_pre_impl__lwp_suspend(long long target);
-void __sanitizer_syscall_post_impl__lwp_suspend(long long res,
-                                                long long target);
-void __sanitizer_syscall_pre_impl__lwp_continue(long long target);
-void __sanitizer_syscall_post_impl__lwp_continue(long long res,
-                                                 long long target);
-void __sanitizer_syscall_pre_impl__lwp_wakeup(long long target);
-void __sanitizer_syscall_post_impl__lwp_wakeup(long long res, long long target);
-void __sanitizer_syscall_pre_impl__lwp_getprivate(void);
-void __sanitizer_syscall_post_impl__lwp_getprivate(long long res);
-void __sanitizer_syscall_pre_impl__lwp_setprivate(long long ptr);
-void __sanitizer_syscall_post_impl__lwp_setprivate(long long res,
-                                                   long long ptr);
-void __sanitizer_syscall_pre_impl__lwp_kill(long long target, long long signo);
-void __sanitizer_syscall_post_impl__lwp_kill(long long res, long long target,
-                                             long long signo);
-void __sanitizer_syscall_pre_impl__lwp_detach(long long target);
-void __sanitizer_syscall_post_impl__lwp_detach(long long res, long long target);
-void __sanitizer_syscall_pre_impl_compat_50__lwp_park(long long ts,
-                                                      long long unpark,
-                                                      long long hint,
-                                                      long long unparkhint);
-void __sanitizer_syscall_post_impl_compat_50__lwp_park(long long res,
-                                                       long long ts,
-                                                       long long unpark,
-                                                       long long hint,
-                                                       long long unparkhint);
-void __sanitizer_syscall_pre_impl__lwp_unpark(long long target, long long hint);
-void __sanitizer_syscall_post_impl__lwp_unpark(long long res, long long target,
-                                               long long hint);
-void __sanitizer_syscall_pre_impl__lwp_unpark_all(long long targets,
-                                                  long long ntargets,
-                                                  long long hint);
-void __sanitizer_syscall_post_impl__lwp_unpark_all(long long res,
-                                                   long long targets,
-                                                   long long ntargets,
-                                                   long long hint);
-void __sanitizer_syscall_pre_impl__lwp_setname(long long target,
-                                               long long name);
-void __sanitizer_syscall_post_impl__lwp_setname(long long res, long long target,
-                                                long long name);
-void __sanitizer_syscall_pre_impl__lwp_getname(long long target, long long name,
-                                               long long len);
-void __sanitizer_syscall_post_impl__lwp_getname(long long res, long long target,
-                                                long long name, long long len);
-void __sanitizer_syscall_pre_impl__lwp_ctl(long long features,
-                                           long long address);
-void __sanitizer_syscall_post_impl__lwp_ctl(long long res, long long features,
-                                            long long address);
-/* syscall 326 has been skipped */
-/* syscall 327 has been skipped */
-/* syscall 328 has been skipped */
-/* syscall 329 has been skipped */
-void __sanitizer_syscall_pre_impl_compat_60_sa_register(
-    long long newv, long long oldv, long long flags,
-    long long stackinfo_offset);
-void __sanitizer_syscall_post_impl_compat_60_sa_register(
-    long long res, long long newv, long long oldv, long long flags,
-    long long stackinfo_offset);
-void __sanitizer_syscall_pre_impl_compat_60_sa_stacks(long long num,
-                                                      long long stacks);
-void __sanitizer_syscall_post_impl_compat_60_sa_stacks(long long res,
-                                                       long long num,
-                                                       long long stacks);
-void __sanitizer_syscall_pre_impl_compat_60_sa_enable(void);
-void __sanitizer_syscall_post_impl_compat_60_sa_enable(long long res);
-void __sanitizer_syscall_pre_impl_compat_60_sa_setconcurrency(
-    long long concurrency);
-void __sanitizer_syscall_post_impl_compat_60_sa_setconcurrency(
-    long long res, long long concurrency);
-void __sanitizer_syscall_pre_impl_compat_60_sa_yield(void);
-void __sanitizer_syscall_post_impl_compat_60_sa_yield(long long res);
-void __sanitizer_syscall_pre_impl_compat_60_sa_preempt(long long sa_id);
-void __sanitizer_syscall_post_impl_compat_60_sa_preempt(long long res,
-                                                        long long sa_id);
-/* syscall 336 has been skipped */
-/* syscall 337 has been skipped */
-/* syscall 338 has been skipped */
-/* syscall 339 has been skipped */
-void __sanitizer_syscall_pre_impl___sigaction_sigtramp(long long signum,
-                                                       long long nsa,
-                                                       long long osa,
-                                                       long long tramp,
-                                                       long long vers);
-void __sanitizer_syscall_post_impl___sigaction_sigtramp(
-    long long res, long long signum, long long nsa, long long osa,
-    long long tramp, long long vers);
-/* syscall 341 has been skipped */
-/* syscall 342 has been skipped */
-void __sanitizer_syscall_pre_impl_rasctl(long long addr, long long len,
-                                         long long op);
-void __sanitizer_syscall_post_impl_rasctl(long long res, long long addr,
-                                          long long len, long long op);
-void __sanitizer_syscall_pre_impl_kqueue(void);
-void __sanitizer_syscall_post_impl_kqueue(long long res);
-void __sanitizer_syscall_pre_impl_compat_50_kevent(
-    long long fd, long long changelist, long long nchanges, long long eventlist,
-    long long nevents, long long timeout);
-void __sanitizer_syscall_post_impl_compat_50_kevent(
-    long long res, long long fd, long long changelist, long long nchanges,
-    long long eventlist, long long nevents, long long timeout);
-void __sanitizer_syscall_pre_impl__sched_setparam(long long pid, long long lid,
-                                                  long long policy,
-                                                  long long params);
-void __sanitizer_syscall_post_impl__sched_setparam(long long res, long long pid,
-                                                   long long lid,
-                                                   long long policy,
-                                                   long long params);
-void __sanitizer_syscall_pre_impl__sched_getparam(long long pid, long long lid,
-                                                  long long policy,
-                                                  long long params);
-void __sanitizer_syscall_post_impl__sched_getparam(long long res, long long pid,
-                                                   long long lid,
-                                                   long long policy,
-                                                   long long params);
-void __sanitizer_syscall_pre_impl__sched_setaffinity(long long pid,
-                                                     long long lid,
-                                                     long long size,
-                                                     long long cpuset);
-void __sanitizer_syscall_post_impl__sched_setaffinity(long long res,
-                                                      long long pid,
-                                                      long long lid,
-                                                      long long size,
-                                                      long long cpuset);
-void __sanitizer_syscall_pre_impl__sched_getaffinity(long long pid,
-                                                     long long lid,
-                                                     long long size,
-                                                     long long cpuset);
-void __sanitizer_syscall_post_impl__sched_getaffinity(long long res,
-                                                      long long pid,
-                                                      long long lid,
-                                                      long long size,
-                                                      long long cpuset);
-void __sanitizer_syscall_pre_impl_sched_yield(void);
-void __sanitizer_syscall_post_impl_sched_yield(long long res);
-void __sanitizer_syscall_pre_impl__sched_protect(long long priority);
-void __sanitizer_syscall_post_impl__sched_protect(long long res,
-                                                  long long priority);
-/* syscall 352 has been skipped */
-/* syscall 353 has been skipped */
-void __sanitizer_syscall_pre_impl_fsync_range(long long fd, long long flags,
-                                              long long start,
-                                              long long length);
-void __sanitizer_syscall_post_impl_fsync_range(long long res, long long fd,
-                                               long long flags, long long start,
-                                               long long length);
-void __sanitizer_syscall_pre_impl_uuidgen(long long store, long long count);
-void __sanitizer_syscall_post_impl_uuidgen(long long res, long long store,
-                                           long long count);
-void __sanitizer_syscall_pre_impl_getvfsstat(long long buf, long long bufsize,
-                                             long long flags);
-void __sanitizer_syscall_post_impl_getvfsstat(long long res, long long buf,
-                                              long long bufsize,
-                                              long long flags);
-void __sanitizer_syscall_pre_impl_statvfs1(long long path, long long buf,
-                                           long long flags);
-void __sanitizer_syscall_post_impl_statvfs1(long long res, long long path,
-                                            long long buf, long long flags);
-void __sanitizer_syscall_pre_impl_fstatvfs1(long long fd, long long buf,
-                                            long long flags);
-void __sanitizer_syscall_post_impl_fstatvfs1(long long res, long long fd,
-                                             long long buf, long long flags);
-void __sanitizer_syscall_pre_impl_compat_30_fhstatvfs1(long long fhp,
-                                                       long long buf,
-                                                       long long flags);
-void __sanitizer_syscall_post_impl_compat_30_fhstatvfs1(long long res,
-                                                        long long fhp,
-                                                        long long buf,
-                                                        long long flags);
-void __sanitizer_syscall_pre_impl_extattrctl(long long path, long long cmd,
-                                             long long filename,
-                                             long long attrnamespace,
-                                             long long attrname);
-void __sanitizer_syscall_post_impl_extattrctl(long long res, long long path,
-                                              long long cmd, long long filename,
-                                              long long attrnamespace,
-                                              long long attrname);
-void __sanitizer_syscall_pre_impl_extattr_set_file(long long path,
-                                                   long long attrnamespace,
-                                                   long long attrname,
-                                                   long long data,
-                                                   long long nbytes);
-void __sanitizer_syscall_post_impl_extattr_set_file(
-    long long res, long long path, long long attrnamespace, long long attrname,
-    long long data, long long nbytes);
-void __sanitizer_syscall_pre_impl_extattr_get_file(long long path,
-                                                   long long attrnamespace,
-                                                   long long attrname,
-                                                   long long data,
-                                                   long long nbytes);
-void __sanitizer_syscall_post_impl_extattr_get_file(
-    long long res, long long path, long long attrnamespace, long long attrname,
-    long long data, long long nbytes);
-void __sanitizer_syscall_pre_impl_extattr_delete_file(long long path,
-                                                      long long attrnamespace,
-                                                      long long attrname);
-void __sanitizer_syscall_post_impl_extattr_delete_file(long long res,
-                                                       long long path,
-                                                       long long attrnamespace,
-                                                       long long attrname);
-void __sanitizer_syscall_pre_impl_extattr_set_fd(long long fd,
-                                                 long long attrnamespace,
-                                                 long long attrname,
-                                                 long long data,
-                                                 long long nbytes);
-void __sanitizer_syscall_post_impl_extattr_set_fd(long long res, long long fd,
-                                                  long long attrnamespace,
-                                                  long long attrname,
-                                                  long long data,
-                                                  long long nbytes);
-void __sanitizer_syscall_pre_impl_extattr_get_fd(long long fd,
-                                                 long long attrnamespace,
-                                                 long long attrname,
-                                                 long long data,
-                                                 long long nbytes);
-void __sanitizer_syscall_post_impl_extattr_get_fd(long long res, long long fd,
-                                                  long long attrnamespace,
-                                                  long long attrname,
-                                                  long long data,
-                                                  long long nbytes);
-void __sanitizer_syscall_pre_impl_extattr_delete_fd(long long fd,
-                                                    long long attrnamespace,
-                                                    long long attrname);
-void __sanitizer_syscall_post_impl_extattr_delete_fd(long long res,
-                                                     long long fd,
-                                                     long long attrnamespace,
-                                                     long long attrname);
-void __sanitizer_syscall_pre_impl_extattr_set_link(long long path,
-                                                   long long attrnamespace,
-                                                   long long attrname,
-                                                   long long data,
-                                                   long long nbytes);
-void __sanitizer_syscall_post_impl_extattr_set_link(
-    long long res, long long path, long long attrnamespace, long long attrname,
-    long long data, long long nbytes);
-void __sanitizer_syscall_pre_impl_extattr_get_link(long long path,
-                                                   long long attrnamespace,
-                                                   long long attrname,
-                                                   long long data,
-                                                   long long nbytes);
-void __sanitizer_syscall_post_impl_extattr_get_link(
-    long long res, long long path, long long attrnamespace, long long attrname,
-    long long data, long long nbytes);
-void __sanitizer_syscall_pre_impl_extattr_delete_link(long long path,
-                                                      long long attrnamespace,
-                                                      long long attrname);
-void __sanitizer_syscall_post_impl_extattr_delete_link(long long res,
-                                                       long long path,
-                                                       long long attrnamespace,
-                                                       long long attrname);
-void __sanitizer_syscall_pre_impl_extattr_list_fd(long long fd,
-                                                  long long attrnamespace,
-                                                  long long data,
-                                                  long long nbytes);
-void __sanitizer_syscall_post_impl_extattr_list_fd(long long res, long long fd,
-                                                   long long attrnamespace,
-                                                   long long data,
-                                                   long long nbytes);
-void __sanitizer_syscall_pre_impl_extattr_list_file(long long path,
-                                                    long long attrnamespace,
-                                                    long long data,
-                                                    long long nbytes);
-void __sanitizer_syscall_post_impl_extattr_list_file(long long res,
-                                                     long long path,
-                                                     long long attrnamespace,
-                                                     long long data,
-                                                     long long nbytes);
-void __sanitizer_syscall_pre_impl_extattr_list_link(long long path,
-                                                    long long attrnamespace,
-                                                    long long data,
-                                                    long long nbytes);
-void __sanitizer_syscall_post_impl_extattr_list_link(long long res,
-                                                     long long path,
-                                                     long long attrnamespace,
-                                                     long long data,
-                                                     long long nbytes);
-void __sanitizer_syscall_pre_impl_compat_50_pselect(long long nd, long long in,
-                                                    long long ou, long long ex,
-                                                    long long ts,
-                                                    long long mask);
-void __sanitizer_syscall_post_impl_compat_50_pselect(long long res,
-                                                     long long nd, long long in,
-                                                     long long ou, long long ex,
-                                                     long long ts,
-                                                     long long mask);
-void __sanitizer_syscall_pre_impl_compat_50_pollts(long long fds,
-                                                   long long nfds, long long ts,
-                                                   long long mask);
-void __sanitizer_syscall_post_impl_compat_50_pollts(
-    long long res, long long fds, long long nfds, long long ts, long long mask);
-void __sanitizer_syscall_pre_impl_setxattr(long long path, long long name,
-                                           long long value, long long size,
-                                           long long flags);
-void __sanitizer_syscall_post_impl_setxattr(long long res, long long path,
-                                            long long name, long long value,
-                                            long long size, long long flags);
-void __sanitizer_syscall_pre_impl_lsetxattr(long long path, long long name,
-                                            long long value, long long size,
-                                            long long flags);
-void __sanitizer_syscall_post_impl_lsetxattr(long long res, long long path,
-                                             long long name, long long value,
-                                             long long size, long long flags);
-void __sanitizer_syscall_pre_impl_fsetxattr(long long fd, long long name,
-                                            long long value, long long size,
-                                            long long flags);
-void __sanitizer_syscall_post_impl_fsetxattr(long long res, long long fd,
-                                             long long name, long long value,
-                                             long long size, long long flags);
-void __sanitizer_syscall_pre_impl_getxattr(long long path, long long name,
-                                           long long value, long long size);
-void __sanitizer_syscall_post_impl_getxattr(long long res, long long path,
-                                            long long name, long long value,
-                                            long long size);
-void __sanitizer_syscall_pre_impl_lgetxattr(long long path, long long name,
-                                            long long value, long long size);
-void __sanitizer_syscall_post_impl_lgetxattr(long long res, long long path,
-                                             long long name, long long value,
-                                             long long size);
-void __sanitizer_syscall_pre_impl_fgetxattr(long long fd, long long name,
-                                            long long value, long long size);
-void __sanitizer_syscall_post_impl_fgetxattr(long long res, long long fd,
-                                             long long name, long long value,
-                                             long long size);
-void __sanitizer_syscall_pre_impl_listxattr(long long path, long long list,
-                                            long long size);
-void __sanitizer_syscall_post_impl_listxattr(long long res, long long path,
-                                             long long list, long long size);
-void __sanitizer_syscall_pre_impl_llistxattr(long long path, long long list,
-                                             long long size);
-void __sanitizer_syscall_post_impl_llistxattr(long long res, long long path,
-                                              long long list, long long size);
-void __sanitizer_syscall_pre_impl_flistxattr(long long fd, long long list,
-                                             long long size);
-void __sanitizer_syscall_post_impl_flistxattr(long long res, long long fd,
-                                              long long list, long long size);
-void __sanitizer_syscall_pre_impl_removexattr(long long path, long long name);
-void __sanitizer_syscall_post_impl_removexattr(long long res, long long path,
-                                               long long name);
-void __sanitizer_syscall_pre_impl_lremovexattr(long long path, long long name);
-void __sanitizer_syscall_post_impl_lremovexattr(long long res, long long path,
-                                                long long name);
-void __sanitizer_syscall_pre_impl_fremovexattr(long long fd, long long name);
-void __sanitizer_syscall_post_impl_fremovexattr(long long res, long long fd,
-                                                long long name);
-void __sanitizer_syscall_pre_impl_compat_50___stat30(long long path,
-                                                     long long ub);
-void __sanitizer_syscall_post_impl_compat_50___stat30(long long res,
-                                                      long long path,
-                                                      long long ub);
-void __sanitizer_syscall_pre_impl_compat_50___fstat30(long long fd,
-                                                      long long sb);
-void __sanitizer_syscall_post_impl_compat_50___fstat30(long long res,
-                                                       long long fd,
-                                                       long long sb);
-void __sanitizer_syscall_pre_impl_compat_50___lstat30(long long path,
-                                                      long long ub);
-void __sanitizer_syscall_post_impl_compat_50___lstat30(long long res,
-                                                       long long path,
-                                                       long long ub);
-void __sanitizer_syscall_pre_impl___getdents30(long long fd, long long buf,
-                                               long long count);
-void __sanitizer_syscall_post_impl___getdents30(long long res, long long fd,
-                                                long long buf, long long count);
-void __sanitizer_syscall_pre_impl_posix_fadvise(long long);
-void __sanitizer_syscall_post_impl_posix_fadvise(long long res, long long);
-void __sanitizer_syscall_pre_impl_compat_30___fhstat30(long long fhp,
-                                                       long long sb);
-void __sanitizer_syscall_post_impl_compat_30___fhstat30(long long res,
-                                                        long long fhp,
-                                                        long long sb);
-void __sanitizer_syscall_pre_impl_compat_50___ntp_gettime30(long long ntvp);
-void __sanitizer_syscall_post_impl_compat_50___ntp_gettime30(long long res,
-                                                             long long ntvp);
-void __sanitizer_syscall_pre_impl___socket30(long long domain, long long type,
-                                             long long protocol);
-void __sanitizer_syscall_post_impl___socket30(long long res, long long domain,
-                                              long long type,
-                                              long long protocol);
-void __sanitizer_syscall_pre_impl___getfh30(long long fname, long long fhp,
-                                            long long fh_size);
-void __sanitizer_syscall_post_impl___getfh30(long long res, long long fname,
-                                             long long fhp, long long fh_size);
-void __sanitizer_syscall_pre_impl___fhopen40(long long fhp, long long fh_size,
-                                             long long flags);
-void __sanitizer_syscall_post_impl___fhopen40(long long res, long long fhp,
-                                              long long fh_size,
-                                              long long flags);
-void __sanitizer_syscall_pre_impl___fhstatvfs140(long long fhp,
-                                                 long long fh_size,
-                                                 long long buf,
-                                                 long long flags);
-void __sanitizer_syscall_post_impl___fhstatvfs140(long long res, long long fhp,
-                                                  long long fh_size,
-                                                  long long buf,
-                                                  long long flags);
-void __sanitizer_syscall_pre_impl_compat_50___fhstat40(long long fhp,
-                                                       long long fh_size,
-                                                       long long sb);
-void __sanitizer_syscall_post_impl_compat_50___fhstat40(long long res,
-                                                        long long fhp,
-                                                        long long fh_size,
-                                                        long long sb);
-void __sanitizer_syscall_pre_impl_aio_cancel(long long fildes,
-                                             long long aiocbp);
-void __sanitizer_syscall_post_impl_aio_cancel(long long res, long long fildes,
-                                              long long aiocbp);
-void __sanitizer_syscall_pre_impl_aio_error(long long aiocbp);
-void __sanitizer_syscall_post_impl_aio_error(long long res, long long aiocbp);
-void __sanitizer_syscall_pre_impl_aio_fsync(long long op, long long aiocbp);
-void __sanitizer_syscall_post_impl_aio_fsync(long long res, long long op,
-                                             long long aiocbp);
-void __sanitizer_syscall_pre_impl_aio_read(long long aiocbp);
-void __sanitizer_syscall_post_impl_aio_read(long long res, long long aiocbp);
-void __sanitizer_syscall_pre_impl_aio_return(long long aiocbp);
-void __sanitizer_syscall_post_impl_aio_return(long long res, long long aiocbp);
-void __sanitizer_syscall_pre_impl_compat_50_aio_suspend(long long list,
-                                                        long long nent,
-                                                        long long timeout);
-void __sanitizer_syscall_post_impl_compat_50_aio_suspend(long long res,
-                                                         long long list,
-                                                         long long nent,
-                                                         long long timeout);
-void __sanitizer_syscall_pre_impl_aio_write(long long aiocbp);
-void __sanitizer_syscall_post_impl_aio_write(long long res, long long aiocbp);
-void __sanitizer_syscall_pre_impl_lio_listio(long long mode, long long list,
-                                             long long nent, long long sig);
-void __sanitizer_syscall_post_impl_lio_listio(long long res, long long mode,
-                                              long long list, long long nent,
-                                              long long sig);
-/* syscall 407 has been skipped */
-/* syscall 408 has been skipped */
-/* syscall 409 has been skipped */
-void __sanitizer_syscall_pre_impl___mount50(long long type, long long path,
-                                            long long flags, long long data,
-                                            long long data_len);
-void __sanitizer_syscall_post_impl___mount50(long long res, long long type,
-                                             long long path, long long flags,
-                                             long long data,
-                                             long long data_len);
-void __sanitizer_syscall_pre_impl_mremap(long long old_address,
-                                         long long old_size,
-                                         long long new_address,
-                                         long long new_size, long long flags);
-void __sanitizer_syscall_post_impl_mremap(long long res, long long old_address,
-                                          long long old_size,
-                                          long long new_address,
-                                          long long new_size, long long flags);
-void __sanitizer_syscall_pre_impl_pset_create(long long psid);
-void __sanitizer_syscall_post_impl_pset_create(long long res, long long psid);
-void __sanitizer_syscall_pre_impl_pset_destroy(long long psid);
-void __sanitizer_syscall_post_impl_pset_destroy(long long res, long long psid);
-void __sanitizer_syscall_pre_impl_pset_assign(long long psid, long long cpuid,
-                                              long long opsid);
-void __sanitizer_syscall_post_impl_pset_assign(long long res, long long psid,
-                                               long long cpuid,
-                                               long long opsid);
-void __sanitizer_syscall_pre_impl__pset_bind(long long idtype,
-                                             long long first_id,
-                                             long long second_id,
-                                             long long psid, long long opsid);
-void __sanitizer_syscall_post_impl__pset_bind(long long res, long long idtype,
-                                              long long first_id,
-                                              long long second_id,
-                                              long long psid, long long opsid);
-void __sanitizer_syscall_pre_impl___posix_fadvise50(long long fd, long long PAD,
-                                                    long long offset,
-                                                    long long len,
-                                                    long long advice);
-void __sanitizer_syscall_post_impl___posix_fadvise50(
-    long long res, long long fd, long long PAD, long long offset, long long len,
-    long long advice);
-void __sanitizer_syscall_pre_impl___select50(long long nd, long long in,
-                                             long long ou, long long ex,
-                                             long long tv);
-void __sanitizer_syscall_post_impl___select50(long long res, long long nd,
-                                              long long in, long long ou,
-                                              long long ex, long long tv);
-void __sanitizer_syscall_pre_impl___gettimeofday50(long long tp, long long tzp);
-void __sanitizer_syscall_post_impl___gettimeofday50(long long res, long long tp,
-                                                    long long tzp);
-void __sanitizer_syscall_pre_impl___settimeofday50(long long tv, long long tzp);
-void __sanitizer_syscall_post_impl___settimeofday50(long long res, long long tv,
-                                                    long long tzp);
-void __sanitizer_syscall_pre_impl___utimes50(long long path, long long tptr);
-void __sanitizer_syscall_post_impl___utimes50(long long res, long long path,
-                                              long long tptr);
-void __sanitizer_syscall_pre_impl___adjtime50(long long delta,
-                                              long long olddelta);
-void __sanitizer_syscall_post_impl___adjtime50(long long res, long long delta,
-                                               long long olddelta);
-void __sanitizer_syscall_pre_impl___lfs_segwait50(long long fsidp,
-                                                  long long tv);
-void __sanitizer_syscall_post_impl___lfs_segwait50(long long res,
-                                                   long long fsidp,
-                                                   long long tv);
-void __sanitizer_syscall_pre_impl___futimes50(long long fd, long long tptr);
-void __sanitizer_syscall_post_impl___futimes50(long long res, long long fd,
-                                               long long tptr);
-void __sanitizer_syscall_pre_impl___lutimes50(long long path, long long tptr);
-void __sanitizer_syscall_post_impl___lutimes50(long long res, long long path,
-                                               long long tptr);
-void __sanitizer_syscall_pre_impl___setitimer50(long long which, long long itv,
-                                                long long oitv);
-void __sanitizer_syscall_post_impl___setitimer50(long long res, long long which,
-                                                 long long itv, long long oitv);
-void __sanitizer_syscall_pre_impl___getitimer50(long long which, long long itv);
-void __sanitizer_syscall_post_impl___getitimer50(long long res, long long which,
-                                                 long long itv);
-void __sanitizer_syscall_pre_impl___clock_gettime50(long long clock_id,
-                                                    long long tp);
-void __sanitizer_syscall_post_impl___clock_gettime50(long long res,
-                                                     long long clock_id,
-                                                     long long tp);
-void __sanitizer_syscall_pre_impl___clock_settime50(long long clock_id,
-                                                    long long tp);
-void __sanitizer_syscall_post_impl___clock_settime50(long long res,
-                                                     long long clock_id,
-                                                     long long tp);
-void __sanitizer_syscall_pre_impl___clock_getres50(long long clock_id,
-                                                   long long tp);
-void __sanitizer_syscall_post_impl___clock_getres50(long long res,
-                                                    long long clock_id,
-                                                    long long tp);
-void __sanitizer_syscall_pre_impl___nanosleep50(long long rqtp, long long rmtp);
-void __sanitizer_syscall_post_impl___nanosleep50(long long res, long long rqtp,
-                                                 long long rmtp);
-void __sanitizer_syscall_pre_impl_____sigtimedwait50(long long set,
-                                                     long long info,
-                                                     long long timeout);
-void __sanitizer_syscall_post_impl_____sigtimedwait50(long long res,
-                                                      long long set,
-                                                      long long info,
-                                                      long long timeout);
-void __sanitizer_syscall_pre_impl___mq_timedsend50(long long mqdes,
-                                                   long long msg_ptr,
-                                                   long long msg_len,
-                                                   long long msg_prio,
-                                                   long long abs_timeout);
-void __sanitizer_syscall_post_impl___mq_timedsend50(
-    long long res, long long mqdes, long long msg_ptr, long long msg_len,
-    long long msg_prio, long long abs_timeout);
-void __sanitizer_syscall_pre_impl___mq_timedreceive50(long long mqdes,
-                                                      long long msg_ptr,
-                                                      long long msg_len,
-                                                      long long msg_prio,
-                                                      long long abs_timeout);
-void __sanitizer_syscall_post_impl___mq_timedreceive50(
-    long long res, long long mqdes, long long msg_ptr, long long msg_len,
-    long long msg_prio, long long abs_timeout);
-void __sanitizer_syscall_pre_impl_compat_60__lwp_park(long long ts,
-                                                      long long unpark,
-                                                      long long hint,
-                                                      long long unparkhint);
-void __sanitizer_syscall_post_impl_compat_60__lwp_park(long long res,
-                                                       long long ts,
-                                                       long long unpark,
-                                                       long long hint,
-                                                       long long unparkhint);
-void __sanitizer_syscall_pre_impl___kevent50(long long fd, long long changelist,
-                                             long long nchanges,
-                                             long long eventlist,
-                                             long long nevents,
-                                             long long timeout);
-void __sanitizer_syscall_post_impl___kevent50(
-    long long res, long long fd, long long changelist, long long nchanges,
-    long long eventlist, long long nevents, long long timeout);
-void __sanitizer_syscall_pre_impl___pselect50(long long nd, long long in,
-                                              long long ou, long long ex,
-                                              long long ts, long long mask);
-void __sanitizer_syscall_post_impl___pselect50(long long res, long long nd,
-                                               long long in, long long ou,
-                                               long long ex, long long ts,
-                                               long long mask);
-void __sanitizer_syscall_pre_impl___pollts50(long long fds, long long nfds,
-                                             long long ts, long long mask);
-void __sanitizer_syscall_post_impl___pollts50(long long res, long long fds,
-                                              long long nfds, long long ts,
-                                              long long mask);
-void __sanitizer_syscall_pre_impl___aio_suspend50(long long list,
-                                                  long long nent,
-                                                  long long timeout);
-void __sanitizer_syscall_post_impl___aio_suspend50(long long res,
-                                                   long long list,
-                                                   long long nent,
-                                                   long long timeout);
-void __sanitizer_syscall_pre_impl___stat50(long long path, long long ub);
-void __sanitizer_syscall_post_impl___stat50(long long res, long long path,
-                                            long long ub);
-void __sanitizer_syscall_pre_impl___fstat50(long long fd, long long sb);
-void __sanitizer_syscall_post_impl___fstat50(long long res, long long fd,
-                                             long long sb);
-void __sanitizer_syscall_pre_impl___lstat50(long long path, long long ub);
-void __sanitizer_syscall_post_impl___lstat50(long long res, long long path,
-                                             long long ub);
-void __sanitizer_syscall_pre_impl_____semctl50(long long semid,
-                                               long long semnum, long long cmd,
-                                               long long arg);
-void __sanitizer_syscall_post_impl_____semctl50(long long res, long long semid,
-                                                long long semnum, long long cmd,
-                                                long long arg);
-void __sanitizer_syscall_pre_impl___shmctl50(long long shmid, long long cmd,
-                                             long long buf);
-void __sanitizer_syscall_post_impl___shmctl50(long long res, long long shmid,
-                                              long long cmd, long long buf);
-void __sanitizer_syscall_pre_impl___msgctl50(long long msqid, long long cmd,
-                                             long long buf);
-void __sanitizer_syscall_post_impl___msgctl50(long long res, long long msqid,
-                                              long long cmd, long long buf);
-void __sanitizer_syscall_pre_impl___getrusage50(long long who,
-                                                long long rusage);
-void __sanitizer_syscall_post_impl___getrusage50(long long res, long long who,
-                                                 long long rusage);
-void __sanitizer_syscall_pre_impl___timer_settime50(long long timerid,
-                                                    long long flags,
-                                                    long long value,
-                                                    long long ovalue);
-void __sanitizer_syscall_post_impl___timer_settime50(long long res,
-                                                     long long timerid,
-                                                     long long flags,
-                                                     long long value,
-                                                     long long ovalue);
-void __sanitizer_syscall_pre_impl___timer_gettime50(long long timerid,
-                                                    long long value);
-void __sanitizer_syscall_post_impl___timer_gettime50(long long res,
-                                                     long long timerid,
-                                                     long long value);
-#if defined(NTP) || !defined(_KERNEL_OPT)
-void __sanitizer_syscall_pre_impl___ntp_gettime50(long long ntvp);
-void __sanitizer_syscall_post_impl___ntp_gettime50(long long res,
-                                                   long long ntvp);
-#else
-/* syscall 448 has been skipped */
-#endif
-void __sanitizer_syscall_pre_impl___wait450(long long pid, long long status,
-                                            long long options,
-                                            long long rusage);
-void __sanitizer_syscall_post_impl___wait450(long long res, long long pid,
-                                             long long status,
-                                             long long options,
-                                             long long rusage);
-void __sanitizer_syscall_pre_impl___mknod50(long long path, long long mode,
-                                            long long dev);
-void __sanitizer_syscall_post_impl___mknod50(long long res, long long path,
-                                             long long mode, long long dev);
-void __sanitizer_syscall_pre_impl___fhstat50(long long fhp, long long fh_size,
-                                             long long sb);
-void __sanitizer_syscall_post_impl___fhstat50(long long res, long long fhp,
-                                              long long fh_size, long long sb);
-/* syscall 452 has been skipped */
-void __sanitizer_syscall_pre_impl_pipe2(long long fildes, long long flags);
-void __sanitizer_syscall_post_impl_pipe2(long long res, long long fildes,
-                                         long long flags);
-void __sanitizer_syscall_pre_impl_dup3(long long from, long long to,
-                                       long long flags);
-void __sanitizer_syscall_post_impl_dup3(long long res, long long from,
-                                        long long to, long long flags);
-void __sanitizer_syscall_pre_impl_kqueue1(long long flags);
-void __sanitizer_syscall_post_impl_kqueue1(long long res, long long flags);
-void __sanitizer_syscall_pre_impl_paccept(long long s, long long name,
-                                          long long anamelen, long long mask,
-                                          long long flags);
-void __sanitizer_syscall_post_impl_paccept(long long res, long long s,
-                                           long long name, long long anamelen,
-                                           long long mask, long long flags);
-void __sanitizer_syscall_pre_impl_linkat(long long fd1, long long name1,
-                                         long long fd2, long long name2,
-                                         long long flags);
-void __sanitizer_syscall_post_impl_linkat(long long res, long long fd1,
-                                          long long name1, long long fd2,
-                                          long long name2, long long flags);
-void __sanitizer_syscall_pre_impl_renameat(long long fromfd, long long from,
-                                           long long tofd, long long to);
-void __sanitizer_syscall_post_impl_renameat(long long res, long long fromfd,
-                                            long long from, long long tofd,
-                                            long long to);
-void __sanitizer_syscall_pre_impl_mkfifoat(long long fd, long long path,
-                                           long long mode);
-void __sanitizer_syscall_post_impl_mkfifoat(long long res, long long fd,
-                                            long long path, long long mode);
-void __sanitizer_syscall_pre_impl_mknodat(long long fd, long long path,
-                                          long long mode, long long PAD,
-                                          long long dev);
-void __sanitizer_syscall_post_impl_mknodat(long long res, long long fd,
-                                           long long path, long long mode,
-                                           long long PAD, long long dev);
-void __sanitizer_syscall_pre_impl_mkdirat(long long fd, long long path,
-                                          long long mode);
-void __sanitizer_syscall_post_impl_mkdirat(long long res, long long fd,
-                                           long long path, long long mode);
-void __sanitizer_syscall_pre_impl_faccessat(long long fd, long long path,
-                                            long long amode, long long flag);
-void __sanitizer_syscall_post_impl_faccessat(long long res, long long fd,
-                                             long long path, long long amode,
-                                             long long flag);
-void __sanitizer_syscall_pre_impl_fchmodat(long long fd, long long path,
-                                           long long mode, long long flag);
-void __sanitizer_syscall_post_impl_fchmodat(long long res, long long fd,
-                                            long long path, long long mode,
-                                            long long flag);
-void __sanitizer_syscall_pre_impl_fchownat(long long fd, long long path,
-                                           long long owner, long long group,
-                                           long long flag);
-void __sanitizer_syscall_post_impl_fchownat(long long res, long long fd,
-                                            long long path, long long owner,
-                                            long long group, long long flag);
-void __sanitizer_syscall_pre_impl_fexecve(long long fd, long long argp,
-                                          long long envp);
-void __sanitizer_syscall_post_impl_fexecve(long long res, long long fd,
-                                           long long argp, long long envp);
-void __sanitizer_syscall_pre_impl_fstatat(long long fd, long long path,
-                                          long long buf, long long flag);
-void __sanitizer_syscall_post_impl_fstatat(long long res, long long fd,
-                                           long long path, long long buf,
-                                           long long flag);
-void __sanitizer_syscall_pre_impl_utimensat(long long fd, long long path,
-                                            long long tptr, long long flag);
-void __sanitizer_syscall_post_impl_utimensat(long long res, long long fd,
-                                             long long path, long long tptr,
-                                             long long flag);
-void __sanitizer_syscall_pre_impl_openat(long long fd, long long path,
-                                         long long oflags, long long mode);
-void __sanitizer_syscall_post_impl_openat(long long res, long long fd,
-                                          long long path, long long oflags,
-                                          long long mode);
-void __sanitizer_syscall_pre_impl_readlinkat(long long fd, long long path,
-                                             long long buf, long long bufsize);
-void __sanitizer_syscall_post_impl_readlinkat(long long res, long long fd,
-                                              long long path, long long buf,
-                                              long long bufsize);
-void __sanitizer_syscall_pre_impl_symlinkat(long long path1, long long fd,
-                                            long long path2);
-void __sanitizer_syscall_post_impl_symlinkat(long long res, long long path1,
-                                             long long fd, long long path2);
-void __sanitizer_syscall_pre_impl_unlinkat(long long fd, long long path,
-                                           long long flag);
-void __sanitizer_syscall_post_impl_unlinkat(long long res, long long fd,
-                                            long long path, long long flag);
-void __sanitizer_syscall_pre_impl_futimens(long long fd, long long tptr);
-void __sanitizer_syscall_post_impl_futimens(long long res, long long fd,
-                                            long long tptr);
-void __sanitizer_syscall_pre_impl___quotactl(long long path, long long args);
-void __sanitizer_syscall_post_impl___quotactl(long long res, long long path,
-                                              long long args);
-void __sanitizer_syscall_pre_impl_posix_spawn(long long pid, long long path,
-                                              long long file_actions,
-                                              long long attrp, long long argv,
-                                              long long envp);
-void __sanitizer_syscall_post_impl_posix_spawn(long long res, long long pid,
-                                               long long path,
-                                               long long file_actions,
-                                               long long attrp, long long argv,
-                                               long long envp);
-void __sanitizer_syscall_pre_impl_recvmmsg(long long s, long long mmsg,
-                                           long long vlen, long long flags,
-                                           long long timeout);
-void __sanitizer_syscall_post_impl_recvmmsg(long long res, long long s,
-                                            long long mmsg, long long vlen,
-                                            long long flags, long long timeout);
-void __sanitizer_syscall_pre_impl_sendmmsg(long long s, long long mmsg,
-                                           long long vlen, long long flags);
-void __sanitizer_syscall_post_impl_sendmmsg(long long res, long long s,
-                                            long long mmsg, long long vlen,
-                                            long long flags);
-void __sanitizer_syscall_pre_impl_clock_nanosleep(long long clock_id,
-                                                  long long flags,
-                                                  long long rqtp,
-                                                  long long rmtp);
-void __sanitizer_syscall_post_impl_clock_nanosleep(long long res,
-                                                   long long clock_id,
-                                                   long long flags,
-                                                   long long rqtp,
-                                                   long long rmtp);
-void __sanitizer_syscall_pre_impl____lwp_park60(long long clock_id,
-                                                long long flags, long long ts,
-                                                long long unpark,
-                                                long long hint,
-                                                long long unparkhint);
-void __sanitizer_syscall_post_impl____lwp_park60(
-    long long res, long long clock_id, long long flags, long long ts,
-    long long unpark, long long hint, long long unparkhint);
-void __sanitizer_syscall_pre_impl_posix_fallocate(long long fd, long long PAD,
-                                                  long long pos, long long len);
-void __sanitizer_syscall_post_impl_posix_fallocate(long long res, long long fd,
-                                                   long long PAD, long long pos,
-                                                   long long len);
-void __sanitizer_syscall_pre_impl_fdiscard(long long fd, long long PAD,
-                                           long long pos, long long len);
-void __sanitizer_syscall_post_impl_fdiscard(long long res, long long fd,
-                                            long long PAD, long long pos,
-                                            long long len);
-void __sanitizer_syscall_pre_impl_wait6(long long idtype, long long id,
-                                        long long status, long long options,
-                                        long long wru, long long info);
-void __sanitizer_syscall_post_impl_wait6(long long res, long long idtype,
-                                         long long id, long long status,
-                                         long long options, long long wru,
-                                         long long info);
-void __sanitizer_syscall_pre_impl_clock_getcpuclockid2(long long idtype,
-                                                       long long id,
-                                                       long long clock_id);
-void __sanitizer_syscall_post_impl_clock_getcpuclockid2(long long res,
-                                                        long long idtype,
-                                                        long long id,
-                                                        long long clock_id);
-
-#ifdef __cplusplus
-} // extern "C"
-#endif
-
-// DO NOT EDIT! THIS FILE HAS BEEN GENERATED!
-
-#endif // SANITIZER_NETBSD_SYSCALL_HOOKS_H
diff --git a/linux-x86/lib64/clang/10.0.1/include/__clang_cuda_builtin_vars.h b/linux-x86/lib64/clang/10.0.5/include/__clang_cuda_builtin_vars.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/__clang_cuda_builtin_vars.h
rename to linux-x86/lib64/clang/10.0.5/include/__clang_cuda_builtin_vars.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/__clang_cuda_cmath.h b/linux-x86/lib64/clang/10.0.5/include/__clang_cuda_cmath.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/__clang_cuda_cmath.h
rename to linux-x86/lib64/clang/10.0.5/include/__clang_cuda_cmath.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/__clang_cuda_complex_builtins.h b/linux-x86/lib64/clang/10.0.5/include/__clang_cuda_complex_builtins.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/__clang_cuda_complex_builtins.h
rename to linux-x86/lib64/clang/10.0.5/include/__clang_cuda_complex_builtins.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/__clang_cuda_device_functions.h b/linux-x86/lib64/clang/10.0.5/include/__clang_cuda_device_functions.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/__clang_cuda_device_functions.h
rename to linux-x86/lib64/clang/10.0.5/include/__clang_cuda_device_functions.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/__clang_cuda_intrinsics.h b/linux-x86/lib64/clang/10.0.5/include/__clang_cuda_intrinsics.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/__clang_cuda_intrinsics.h
rename to linux-x86/lib64/clang/10.0.5/include/__clang_cuda_intrinsics.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/__clang_cuda_libdevice_declares.h b/linux-x86/lib64/clang/10.0.5/include/__clang_cuda_libdevice_declares.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/__clang_cuda_libdevice_declares.h
rename to linux-x86/lib64/clang/10.0.5/include/__clang_cuda_libdevice_declares.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/__clang_cuda_math_forward_declares.h b/linux-x86/lib64/clang/10.0.5/include/__clang_cuda_math_forward_declares.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/__clang_cuda_math_forward_declares.h
rename to linux-x86/lib64/clang/10.0.5/include/__clang_cuda_math_forward_declares.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/__clang_cuda_runtime_wrapper.h b/linux-x86/lib64/clang/10.0.5/include/__clang_cuda_runtime_wrapper.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/__clang_cuda_runtime_wrapper.h
rename to linux-x86/lib64/clang/10.0.5/include/__clang_cuda_runtime_wrapper.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/__stddef_max_align_t.h b/linux-x86/lib64/clang/10.0.5/include/__stddef_max_align_t.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/__stddef_max_align_t.h
rename to linux-x86/lib64/clang/10.0.5/include/__stddef_max_align_t.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/__wmmintrin_aes.h b/linux-x86/lib64/clang/10.0.5/include/__wmmintrin_aes.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/__wmmintrin_aes.h
rename to linux-x86/lib64/clang/10.0.5/include/__wmmintrin_aes.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/__wmmintrin_pclmul.h b/linux-x86/lib64/clang/10.0.5/include/__wmmintrin_pclmul.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/__wmmintrin_pclmul.h
rename to linux-x86/lib64/clang/10.0.5/include/__wmmintrin_pclmul.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/adxintrin.h b/linux-x86/lib64/clang/10.0.5/include/adxintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/adxintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/adxintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/altivec.h b/linux-x86/lib64/clang/10.0.5/include/altivec.h
similarity index 98%
copy from darwin-x86/lib64/clang/10.0.1/include/altivec.h
copy to linux-x86/lib64/clang/10.0.5/include/altivec.h
index 4008440..7e231a2 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/altivec.h
+++ b/linux-x86/lib64/clang/10.0.5/include/altivec.h
@@ -2761,8 +2761,8 @@
   return (vector double)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
-static __inline__ vector double __ATTRS_o_ai vec_xl_len_r(unsigned char *__a,
-                                                          size_t __b) {
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_xl_len_r(unsigned char *__a, size_t __b) {
   vector unsigned char __res =
       (vector unsigned char)__builtin_vsx_lxvll(__a, (__b << 56));
 #ifdef __LITTLE_ENDIAN__
@@ -2876,9 +2876,10 @@
 #ifdef __VSX__
 #define vec_ctf(__a, __b)                                                      \
   _Generic((__a), vector int                                                   \
-           : (vector float)__builtin_altivec_vcfsx((__a), (__b)),              \
+           : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)),  \
              vector unsigned int                                               \
-           : (vector float)__builtin_altivec_vcfux((vector int)(__a), (__b)),  \
+           : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
+                                                   (__b)),                     \
              vector unsigned long long                                         \
            : (__builtin_convertvector((vector unsigned long long)(__a),        \
                                       vector double) *                         \
@@ -2892,9 +2893,10 @@
 #else
 #define vec_ctf(__a, __b)                                                      \
   _Generic((__a), vector int                                                   \
-           : (vector float)__builtin_altivec_vcfsx((__a), (__b)),              \
+           : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)),  \
              vector unsigned int                                               \
-           : (vector float)__builtin_altivec_vcfux((vector int)(__a), (__b)))
+           : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
+                                                   (__b)))
 #endif
 
 /* vec_vcfsx */
@@ -2910,10 +2912,11 @@
 #ifdef __VSX__
 #define vec_cts(__a, __b)                                                      \
   _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctsxs((__a), (__b)), vector double             \
+           : __builtin_altivec_vctsxs((vector float)(__a), (__b)),             \
+             vector double                                                     \
            : __extension__({                                                   \
              vector double __ret =                                             \
-                 (__a) *                                                       \
+                 (vector double)(__a) *                                        \
                  (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
                                                             << 52);            \
              __builtin_convertvector(__ret, vector signed long long);          \
@@ -2931,10 +2934,11 @@
 #ifdef __VSX__
 #define vec_ctu(__a, __b)                                                      \
   _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctuxs((__a), (__b)), vector double             \
+           : __builtin_altivec_vctuxs((vector float)(__a), (__b)),             \
+             vector double                                                     \
            : __extension__({                                                   \
              vector double __ret =                                             \
-                 (__a) *                                                       \
+                 (vector double)(__a) *                                        \
                  (vector double)(vector unsigned long long)((0x3ffULL + __b)   \
                                                             << 52);            \
              __builtin_convertvector(__ret, vector unsigned long long);        \
@@ -3286,9 +3290,7 @@
 
 /* vec_dss */
 
-static __inline__ void __attribute__((__always_inline__)) vec_dss(int __a) {
-  __builtin_altivec_dss(__a);
-}
+#define vec_dss __builtin_altivec_dss
 
 /* vec_dssall */
 
@@ -6301,19 +6303,20 @@
 #ifdef __VSX__
 static __inline__ vector double __ATTRS_o_ai vec_or(vector bool long long __a,
                                                     vector double __b) {
-  return (vector unsigned long long)__a | (vector unsigned long long)__b;
+  return (vector double)((vector unsigned long long)__a |
+                         (vector unsigned long long)__b);
 }
 
 static __inline__ vector double __ATTRS_o_ai vec_or(vector double __a,
                                                     vector bool long long __b) {
-  return (vector unsigned long long)__a | (vector unsigned long long)__b;
+  return (vector double)((vector unsigned long long)__a |
+                         (vector unsigned long long)__b);
 }
 
 static __inline__ vector double __ATTRS_o_ai vec_or(vector double __a,
                                                     vector double __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a | (vector unsigned long long)__b;
-  return (vector double)__res;
+  return (vector double)((vector unsigned long long)__a |
+                         (vector unsigned long long)__b);
 }
 
 static __inline__ vector signed long long __ATTRS_o_ai
@@ -14781,7 +14784,7 @@
 static __inline__ int __ATTRS_o_ai vec_all_ne(vector float __a,
                                               vector float __b) {
 #ifdef __VSX__
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b);
+  return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ, __a, __b);
 #else
   return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __b);
 #endif
@@ -16361,27 +16364,32 @@
 
 static inline __ATTRS_o_ai vector signed short vec_xl(signed long long __offset,
                                                       signed short *__ptr) {
-  return *(unaligned_vec_sshort *)(__ptr + __offset);
+  signed char *__addr = (signed char *)__ptr + __offset;
+  return *(unaligned_vec_sshort *)__addr;
 }
 
 static inline __ATTRS_o_ai vector unsigned short
 vec_xl(signed long long __offset, unsigned short *__ptr) {
-  return *(unaligned_vec_ushort *)(__ptr + __offset);
+  signed char *__addr = (signed char *)__ptr + __offset;
+  return *(unaligned_vec_ushort *)__addr;
 }
 
 static inline __ATTRS_o_ai vector signed int vec_xl(signed long long __offset,
                                                     signed int *__ptr) {
-  return *(unaligned_vec_sint *)(__ptr + __offset);
+  signed char *__addr = (signed char *)__ptr + __offset;
+  return *(unaligned_vec_sint *)__addr;
 }
 
 static inline __ATTRS_o_ai vector unsigned int vec_xl(signed long long __offset,
                                                       unsigned int *__ptr) {
-  return *(unaligned_vec_uint *)(__ptr + __offset);
+  signed char *__addr = (signed char *)__ptr + __offset;
+  return *(unaligned_vec_uint *)__addr;
 }
 
 static inline __ATTRS_o_ai vector float vec_xl(signed long long __offset,
                                                float *__ptr) {
-  return *(unaligned_vec_float *)(__ptr + __offset);
+  signed char *__addr = (signed char *)__ptr + __offset;
+  return *(unaligned_vec_float *)__addr;
 }
 
 #ifdef __VSX__
@@ -16391,17 +16399,20 @@
 
 static inline __ATTRS_o_ai vector signed long long
 vec_xl(signed long long __offset, signed long long *__ptr) {
-  return *(unaligned_vec_sll *)(__ptr + __offset);
+  signed char *__addr = (signed char *)__ptr + __offset;
+  return *(unaligned_vec_sll *)__addr;
 }
 
 static inline __ATTRS_o_ai vector unsigned long long
 vec_xl(signed long long __offset, unsigned long long *__ptr) {
-  return *(unaligned_vec_ull *)(__ptr + __offset);
+  signed char *__addr = (signed char *)__ptr + __offset;
+  return *(unaligned_vec_ull *)__addr;
 }
 
 static inline __ATTRS_o_ai vector double vec_xl(signed long long __offset,
                                                 double *__ptr) {
-  return *(unaligned_vec_double *)(__ptr + __offset);
+  signed char *__addr = (signed char *)__ptr + __offset;
+  return *(unaligned_vec_double *)__addr;
 }
 #endif
 
@@ -16411,12 +16422,14 @@
     __attribute__((aligned(1)));
 static inline __ATTRS_o_ai vector signed __int128
 vec_xl(signed long long __offset, signed __int128 *__ptr) {
-  return *(unaligned_vec_si128 *)(__ptr + __offset);
+  signed char *__addr = (signed char *)__ptr + __offset;
+  return *(unaligned_vec_si128 *)__addr;
 }
 
 static inline __ATTRS_o_ai vector unsigned __int128
 vec_xl(signed long long __offset, unsigned __int128 *__ptr) {
-  return *(unaligned_vec_ui128 *)(__ptr + __offset);
+  signed char *__addr = (signed char *)__ptr + __offset;
+  return *(unaligned_vec_ui128 *)__addr;
 }
 #endif
 
@@ -16425,27 +16438,27 @@
 #ifdef __LITTLE_ENDIAN__
 static __inline__ vector signed char __ATTRS_o_ai
 vec_xl_be(signed long long __offset, signed char *__ptr) {
-  vector signed char __vec = __builtin_vsx_lxvd2x_be(__offset, __ptr);
+  vector signed char __vec = (vector signed char)__builtin_vsx_lxvd2x_be(__offset, __ptr);
   return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
                                  13, 12, 11, 10, 9, 8);
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
 vec_xl_be(signed long long __offset, unsigned char *__ptr) {
-  vector unsigned char __vec = __builtin_vsx_lxvd2x_be(__offset, __ptr);
+  vector unsigned char __vec = (vector unsigned char)__builtin_vsx_lxvd2x_be(__offset, __ptr);
   return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
                                  13, 12, 11, 10, 9, 8);
 }
 
 static __inline__ vector signed short  __ATTRS_o_ai
 vec_xl_be(signed long long __offset, signed short *__ptr) {
-  vector signed short __vec = __builtin_vsx_lxvd2x_be(__offset, __ptr);
+  vector signed short __vec = (vector signed short)__builtin_vsx_lxvd2x_be(__offset, __ptr);
   return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
 }
 
 static __inline__ vector unsigned short __ATTRS_o_ai
 vec_xl_be(signed long long __offset, unsigned short *__ptr) {
-  vector unsigned short __vec = __builtin_vsx_lxvd2x_be(__offset, __ptr);
+  vector unsigned short __vec = (vector unsigned short)__builtin_vsx_lxvd2x_be(__offset, __ptr);
   return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
 }
 
@@ -16513,50 +16526,58 @@
 static inline __ATTRS_o_ai void vec_xst(vector signed short __vec,
                                         signed long long __offset,
                                         signed short *__ptr) {
-  *(unaligned_vec_sshort *)(__ptr + __offset) = __vec;
+  signed char *__addr = (signed char *)__ptr + __offset;
+  *(unaligned_vec_sshort *)__addr = __vec;
 }
 
 static inline __ATTRS_o_ai void vec_xst(vector unsigned short __vec,
                                         signed long long __offset,
                                         unsigned short *__ptr) {
-  *(unaligned_vec_ushort *)(__ptr + __offset) = __vec;
+  signed char *__addr = (signed char *)__ptr + __offset;
+  *(unaligned_vec_ushort *)__addr = __vec;
 }
 
 static inline __ATTRS_o_ai void vec_xst(vector signed int __vec,
                                         signed long long __offset,
                                         signed int *__ptr) {
-  *(unaligned_vec_sint *)(__ptr + __offset) = __vec;
+  signed char *__addr = (signed char *)__ptr + __offset;
+  *(unaligned_vec_sint *)__addr = __vec;
 }
 
 static inline __ATTRS_o_ai void vec_xst(vector unsigned int __vec,
                                         signed long long __offset,
                                         unsigned int *__ptr) {
-  *(unaligned_vec_uint *)(__ptr + __offset) = __vec;
+  signed char *__addr = (signed char *)__ptr + __offset;
+  *(unaligned_vec_uint *)__addr = __vec;
 }
 
 static inline __ATTRS_o_ai void vec_xst(vector float __vec,
                                         signed long long __offset,
                                         float *__ptr) {
-  *(unaligned_vec_float *)(__ptr + __offset) = __vec;
+  signed char *__addr = (signed char *)__ptr + __offset;
+  *(unaligned_vec_float *)__addr = __vec;
 }
 
 #ifdef __VSX__
 static inline __ATTRS_o_ai void vec_xst(vector signed long long __vec,
                                         signed long long __offset,
                                         signed long long *__ptr) {
-  *(unaligned_vec_sll *)(__ptr + __offset) = __vec;
+  signed char *__addr = (signed char *)__ptr + __offset;
+  *(unaligned_vec_sll *)__addr = __vec;
 }
 
 static inline __ATTRS_o_ai void vec_xst(vector unsigned long long __vec,
                                         signed long long __offset,
                                         unsigned long long *__ptr) {
-  *(unaligned_vec_ull *)(__ptr + __offset) = __vec;
+  signed char *__addr = (signed char *)__ptr + __offset;
+  *(unaligned_vec_ull *)__addr = __vec;
 }
 
 static inline __ATTRS_o_ai void vec_xst(vector double __vec,
                                         signed long long __offset,
                                         double *__ptr) {
-  *(unaligned_vec_double *)(__ptr + __offset) = __vec;
+  signed char *__addr = (signed char *)__ptr + __offset;
+  *(unaligned_vec_double *)__addr = __vec;
 }
 #endif
 
@@ -16564,13 +16585,15 @@
 static inline __ATTRS_o_ai void vec_xst(vector signed __int128 __vec,
                                         signed long long __offset,
                                         signed __int128 *__ptr) {
-  *(unaligned_vec_si128 *)(__ptr + __offset) = __vec;
+  signed char *__addr = (signed char *)__ptr + __offset;
+  *(unaligned_vec_si128 *)__addr = __vec;
 }
 
 static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
                                         signed long long __offset,
                                         unsigned __int128 *__ptr) {
-  *(unaligned_vec_ui128 *)(__ptr + __offset) = __vec;
+  signed char *__addr = (signed char *)__ptr + __offset;
+  *(unaligned_vec_ui128 *)__addr = __vec;
 }
 #endif
 
@@ -16583,7 +16606,8 @@
   vector signed char __tmp =
      __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
                              13, 12, 11, 10, 9, 8);
-  __builtin_vsx_stxvd2x_be(__tmp, __offset, __ptr);
+  typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double;
+  __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr);
 }
 
 static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned char __vec,
@@ -16592,7 +16616,8 @@
   vector unsigned char __tmp =
      __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
                              13, 12, 11, 10, 9, 8);
-  __builtin_vsx_stxvd2x_be(__tmp, __offset, __ptr);
+  typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double;
+  __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr);
 }
 
 static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed short __vec,
@@ -16600,7 +16625,8 @@
                                                signed short *__ptr) {
   vector signed short __tmp =
      __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
-  __builtin_vsx_stxvd2x_be(__tmp, __offset, __ptr);
+  typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double;
+  __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr);
 }
 
 static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned short __vec,
@@ -16608,7 +16634,8 @@
                                                unsigned short *__ptr) {
   vector unsigned short __tmp =
      __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
-  __builtin_vsx_stxvd2x_be(__tmp, __offset, __ptr);
+  typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double;
+  __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr);
 }
 
 static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed int __vec,
@@ -16620,32 +16647,32 @@
 static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned int __vec,
                                                signed long long  __offset,
                                                unsigned int *__ptr) {
-  __builtin_vsx_stxvw4x_be(__vec, __offset, __ptr);
+  __builtin_vsx_stxvw4x_be((vector int)__vec, __offset, __ptr);
 }
 
 static __inline__ void __ATTRS_o_ai vec_xst_be(vector float __vec,
                                                signed long long  __offset,
                                                float *__ptr) {
-  __builtin_vsx_stxvw4x_be(__vec, __offset, __ptr);
+  __builtin_vsx_stxvw4x_be((vector int)__vec, __offset, __ptr);
 }
 
 #ifdef __VSX__
 static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed long long __vec,
                                                signed long long  __offset,
                                                signed long long *__ptr) {
-  __builtin_vsx_stxvd2x_be(__vec, __offset, __ptr);
+  __builtin_vsx_stxvd2x_be((vector double)__vec, __offset, __ptr);
 }
 
 static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned long long __vec,
                                                signed long long  __offset,
                                                unsigned long long *__ptr) {
-  __builtin_vsx_stxvd2x_be(__vec, __offset, __ptr);
+  __builtin_vsx_stxvd2x_be((vector double)__vec, __offset, __ptr);
 }
 
 static __inline__ void __ATTRS_o_ai vec_xst_be(vector double __vec,
                                                signed long long  __offset,
                                                double *__ptr) {
-  __builtin_vsx_stxvd2x_be(__vec, __offset, __ptr);
+  __builtin_vsx_stxvd2x_be((vector double)__vec, __offset, __ptr);
 }
 #endif
 
@@ -16667,13 +16694,13 @@
 #endif
 
 #ifdef __POWER9_VECTOR__
-#define vec_test_data_class(__a, __b)                                      \
-        _Generic((__a),                                                    \
-           vector float:                                                   \
-             (vector bool int)__builtin_vsx_xvtstdcsp((__a), (__b)),       \
-           vector double:                                                  \
-             (vector bool long long)__builtin_vsx_xvtstdcdp((__a), (__b))  \
-        )
+#define vec_test_data_class(__a, __b)                                          \
+  _Generic(                                                                    \
+      (__a), vector float                                                      \
+      : (vector bool int)__builtin_vsx_xvtstdcsp((vector float)(__a), (__b)),  \
+        vector double                                                          \
+      : (vector bool long long)__builtin_vsx_xvtstdcdp((vector double)(__a),   \
+                                                       (__b)))
 
 #endif /* #ifdef __POWER9_VECTOR__ */
 
diff --git a/linux-x86/lib64/clang/10.0.1/include/ammintrin.h b/linux-x86/lib64/clang/10.0.5/include/ammintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/ammintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/ammintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/arm64intr.h b/linux-x86/lib64/clang/10.0.5/include/arm64intr.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/arm64intr.h
rename to linux-x86/lib64/clang/10.0.5/include/arm64intr.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/arm_acle.h b/linux-x86/lib64/clang/10.0.5/include/arm_acle.h
similarity index 95%
copy from darwin-x86/lib64/clang/10.0.1/include/arm_acle.h
copy to linux-x86/lib64/clang/10.0.5/include/arm_acle.h
index 0510e6f..596ea03 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/arm_acle.h
+++ b/linux-x86/lib64/clang/10.0.5/include/arm_acle.h
@@ -90,9 +90,11 @@
 #endif
 
 /* 8.7 NOP */
+#if !defined(_MSC_VER) || !defined(__aarch64__)
 static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) {
   __builtin_arm_nop();
 }
+#endif
 
 /* 9 DATA-PROCESSING INTRINSICS */
 /* 9.2 Miscellaneous data-processing intrinsics */
@@ -139,6 +141,26 @@
   return __builtin_clzll(__t);
 }
 
+/* CLS */
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__cls(uint32_t __t) {
+  return __builtin_arm_cls(__t);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__clsl(unsigned long __t) {
+#if __SIZEOF_LONG__ == 4
+  return __builtin_arm_cls(__t);
+#else
+  return __builtin_arm_cls64(__t);
+#endif
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__clsll(uint64_t __t) {
+  return __builtin_arm_cls64(__t);
+}
+
 /* REV */
 static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
 __rev(uint32_t __t) {
@@ -609,9 +631,13 @@
 #define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg)
 #define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg)
 #define __arm_rsrp(sysreg) __builtin_arm_rsrp(sysreg)
+#define __arm_rsrf(sysreg) __builtin_bit_cast(float, __arm_rsr(sysreg))
+#define __arm_rsrf64(sysreg) __builtin_bit_cast(double, __arm_rsr64(sysreg))
 #define __arm_wsr(sysreg, v) __builtin_arm_wsr(sysreg, v)
 #define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v)
 #define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v)
+#define __arm_wsrf(sysreg, v) __arm_wsr(sysreg, __builtin_bit_cast(uint32_t, v))
+#define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v))
 
 /* Memory Tagging Extensions (MTE) Intrinsics */
 #if __ARM_FEATURE_MEMORY_TAGGING
diff --git a/darwin-x86/lib64/clang/10.0.1/include/arm_fp16.h b/linux-x86/lib64/clang/10.0.5/include/arm_fp16.h
similarity index 99%
copy from darwin-x86/lib64/clang/10.0.1/include/arm_fp16.h
copy to linux-x86/lib64/clang/10.0.5/include/arm_fp16.h
index 1c04f2a..71b18c1 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/arm_fp16.h
+++ b/linux-x86/lib64/clang/10.0.5/include/arm_fp16.h
@@ -251,66 +251,66 @@
   __ret = (uint64_t) __builtin_neon_vcvtah_u64_f16(__s0); \
   __ret; \
 })
-__ai float16_t vcvth_f16_u32(uint32_t __p0) {
-  float16_t __ret;
-  __ret = (float16_t) __builtin_neon_vcvth_f16_u32(__p0);
-  return __ret;
-}
-__ai float16_t vcvth_f16_u64(uint64_t __p0) {
-  float16_t __ret;
-  __ret = (float16_t) __builtin_neon_vcvth_f16_u64(__p0);
-  return __ret;
-}
 __ai float16_t vcvth_f16_u16(uint16_t __p0) {
   float16_t __ret;
   __ret = (float16_t) __builtin_neon_vcvth_f16_u16(__p0);
   return __ret;
 }
+__ai float16_t vcvth_f16_s16(int16_t __p0) {
+  float16_t __ret;
+  __ret = (float16_t) __builtin_neon_vcvth_f16_s16(__p0);
+  return __ret;
+}
+__ai float16_t vcvth_f16_u32(uint32_t __p0) {
+  float16_t __ret;
+  __ret = (float16_t) __builtin_neon_vcvth_f16_u32(__p0);
+  return __ret;
+}
 __ai float16_t vcvth_f16_s32(int32_t __p0) {
   float16_t __ret;
   __ret = (float16_t) __builtin_neon_vcvth_f16_s32(__p0);
   return __ret;
 }
+__ai float16_t vcvth_f16_u64(uint64_t __p0) {
+  float16_t __ret;
+  __ret = (float16_t) __builtin_neon_vcvth_f16_u64(__p0);
+  return __ret;
+}
 __ai float16_t vcvth_f16_s64(int64_t __p0) {
   float16_t __ret;
   __ret = (float16_t) __builtin_neon_vcvth_f16_s64(__p0);
   return __ret;
 }
-__ai float16_t vcvth_f16_s16(int16_t __p0) {
-  float16_t __ret;
-  __ret = (float16_t) __builtin_neon_vcvth_f16_s16(__p0);
-  return __ret;
-}
 #define vcvth_n_f16_u32(__p0, __p1) __extension__ ({ \
   uint32_t __s0 = __p0; \
   float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vcvth_n_f16_u32(__s0, __p1); \
   __ret; \
 })
-#define vcvth_n_f16_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_n_f16_u64(__s0, __p1); \
-  __ret; \
-})
-#define vcvth_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vcvth_n_f16_u16(__s0, __p1); \
-  __ret; \
-})
 #define vcvth_n_f16_s32(__p0, __p1) __extension__ ({ \
   int32_t __s0 = __p0; \
   float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vcvth_n_f16_s32(__s0, __p1); \
   __ret; \
 })
+#define vcvth_n_f16_u64(__p0, __p1) __extension__ ({ \
+  uint64_t __s0 = __p0; \
+  float16_t __ret; \
+  __ret = (float16_t) __builtin_neon_vcvth_n_f16_u64(__s0, __p1); \
+  __ret; \
+})
 #define vcvth_n_f16_s64(__p0, __p1) __extension__ ({ \
   int64_t __s0 = __p0; \
   float16_t __ret; \
   __ret = (float16_t) __builtin_neon_vcvth_n_f16_s64(__s0, __p1); \
   __ret; \
 })
+#define vcvth_n_f16_u16(__p0, __p1) __extension__ ({ \
+  uint16_t __s0 = __p0; \
+  float16_t __ret; \
+  __ret = (float16_t) __builtin_neon_vcvth_n_f16_u16(__s0, __p1); \
+  __ret; \
+})
 #define vcvth_n_f16_s16(__p0, __p1) __extension__ ({ \
   int16_t __s0 = __p0; \
   float16_t __ret; \
diff --git a/linux-x86/lib64/clang/10.0.5/include/arm_mve.h b/linux-x86/lib64/clang/10.0.5/include/arm_mve.h
new file mode 100644
index 0000000..f7f4c4a
--- /dev/null
+++ b/linux-x86/lib64/clang/10.0.5/include/arm_mve.h
@@ -0,0 +1,4831 @@
+/*===---- arm_mve.h - ARM MVE intrinsics -----------------------------------===
+ *
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ARM_MVE_H
+#define __ARM_MVE_H
+
+#if !__ARM_FEATURE_MVE
+#error "MVE support not enabled"
+#endif
+
+#include <stdint.h>
+
+typedef uint16_t mve_pred16_t;
+typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t;
+typedef struct { int16x8_t val[2]; } int16x8x2_t;
+typedef struct { int16x8_t val[4]; } int16x8x4_t;
+typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t;
+typedef struct { int32x4_t val[2]; } int32x4x2_t;
+typedef struct { int32x4_t val[4]; } int32x4x4_t;
+typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t;
+typedef struct { int64x2_t val[2]; } int64x2x2_t;
+typedef struct { int64x2_t val[4]; } int64x2x4_t;
+typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t;
+typedef struct { int8x16_t val[2]; } int8x16x2_t;
+typedef struct { int8x16_t val[4]; } int8x16x4_t;
+typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t;
+typedef struct { uint16x8_t val[2]; } uint16x8x2_t;
+typedef struct { uint16x8_t val[4]; } uint16x8x4_t;
+typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t;
+typedef struct { uint32x4_t val[2]; } uint32x4x2_t;
+typedef struct { uint32x4_t val[4]; } uint32x4x4_t;
+typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t;
+typedef struct { uint64x2_t val[2]; } uint64x2x2_t;
+typedef struct { uint64x2_t val[4]; } uint64x2x4_t;
+typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t;
+typedef struct { uint8x16_t val[2]; } uint8x16x2_t;
+typedef struct { uint8x16_t val[4]; } uint8x16x4_t;
+
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_asrl)))
+int64_t __arm_asrl(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_lsll)))
+uint64_t __arm_lsll(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshr)))
+int32_t __arm_sqrshr(int32_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshrl)))
+int64_t __arm_sqrshrl(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshrl_sat48)))
+int64_t __arm_sqrshrl_sat48(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqshl)))
+int32_t __arm_sqshl(int32_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqshll)))
+int64_t __arm_sqshll(int64_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_srshr)))
+int32_t __arm_srshr(int32_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_srshrl)))
+int64_t __arm_srshrl(int64_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshl)))
+uint32_t __arm_uqrshl(uint32_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshll)))
+uint64_t __arm_uqrshll(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshll_sat48)))
+uint64_t __arm_uqrshll_sat48(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqshl)))
+uint32_t __arm_uqshl(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqshll)))
+uint64_t __arm_uqshll(uint64_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_urshr)))
+uint32_t __arm_urshr(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_urshrl)))
+uint64_t __arm_urshrl(uint64_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_s32)))
+int32x4_t __arm_vadciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_s32)))
+int32x4_t __arm_vadciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_u32)))
+uint32x4_t __arm_vadciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_u32)))
+uint32x4_t __arm_vadciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_s32)))
+int32x4_t __arm_vadciq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_s32)))
+int32x4_t __arm_vadciq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_u32)))
+uint32x4_t __arm_vadciq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_u32)))
+uint32x4_t __arm_vadciq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_s32)))
+int32x4_t __arm_vadcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_s32)))
+int32x4_t __arm_vadcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_u32)))
+uint32x4_t __arm_vadcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_u32)))
+uint32x4_t __arm_vadcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_s32)))
+int32x4_t __arm_vadcq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_s32)))
+int32x4_t __arm_vadcq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_u32)))
+uint32x4_t __arm_vadcq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_u32)))
+uint32x4_t __arm_vadcq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s16)))
+int16x8_t __arm_vaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s16)))
+int16x8_t __arm_vaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s32)))
+int32x4_t __arm_vaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s32)))
+int32x4_t __arm_vaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s8)))
+int8x16_t __arm_vaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s8)))
+int8x16_t __arm_vaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u16)))
+uint16x8_t __arm_vaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u16)))
+uint16x8_t __arm_vaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u32)))
+uint32x4_t __arm_vaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u32)))
+uint32x4_t __arm_vaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u8)))
+uint8x16_t __arm_vaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u8)))
+uint8x16_t __arm_vaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s16)))
+int16x8_t __arm_vaddq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s16)))
+int16x8_t __arm_vaddq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s32)))
+int32x4_t __arm_vaddq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s32)))
+int32x4_t __arm_vaddq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s8)))
+int8x16_t __arm_vaddq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s8)))
+int8x16_t __arm_vaddq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u16)))
+uint16x8_t __arm_vaddq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u16)))
+uint16x8_t __arm_vaddq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u32)))
+uint32x4_t __arm_vaddq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u32)))
+uint32x4_t __arm_vaddq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u8)))
+uint8x16_t __arm_vaddq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u8)))
+uint8x16_t __arm_vaddq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
+mve_pred16_t __arm_vcmpcsq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
+mve_pred16_t __arm_vcmpcsq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
+mve_pred16_t __arm_vcmpcsq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
+mve_pred16_t __arm_vcmpcsq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
+mve_pred16_t __arm_vcmpcsq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
+mve_pred16_t __arm_vcmpcsq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
+mve_pred16_t __arm_vcmpcsq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
+mve_pred16_t __arm_vcmpcsq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
+mve_pred16_t __arm_vcmpcsq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
+mve_pred16_t __arm_vcmpcsq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
+mve_pred16_t __arm_vcmpcsq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
+mve_pred16_t __arm_vcmpcsq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
+mve_pred16_t __arm_vcmpcsq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
+mve_pred16_t __arm_vcmpcsq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
+mve_pred16_t __arm_vcmpcsq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
+mve_pred16_t __arm_vcmpcsq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
+mve_pred16_t __arm_vcmpcsq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
+mve_pred16_t __arm_vcmpcsq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u16)))
+mve_pred16_t __arm_vcmpcsq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u16)))
+mve_pred16_t __arm_vcmpcsq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u32)))
+mve_pred16_t __arm_vcmpcsq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u32)))
+mve_pred16_t __arm_vcmpcsq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u8)))
+mve_pred16_t __arm_vcmpcsq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u8)))
+mve_pred16_t __arm_vcmpcsq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
+mve_pred16_t __arm_vcmpeqq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
+mve_pred16_t __arm_vcmpeqq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
+mve_pred16_t __arm_vcmpeqq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
+mve_pred16_t __arm_vcmpeqq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
+mve_pred16_t __arm_vcmpeqq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
+mve_pred16_t __arm_vcmpeqq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
+mve_pred16_t __arm_vcmpeqq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
+mve_pred16_t __arm_vcmpeqq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
+mve_pred16_t __arm_vcmpeqq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
+mve_pred16_t __arm_vcmpeqq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
+mve_pred16_t __arm_vcmpeqq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
+mve_pred16_t __arm_vcmpeqq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
+mve_pred16_t __arm_vcmpeqq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
+mve_pred16_t __arm_vcmpeqq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
+mve_pred16_t __arm_vcmpeqq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
+mve_pred16_t __arm_vcmpeqq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
+mve_pred16_t __arm_vcmpeqq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
+mve_pred16_t __arm_vcmpeqq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
+mve_pred16_t __arm_vcmpeqq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
+mve_pred16_t __arm_vcmpeqq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
+mve_pred16_t __arm_vcmpeqq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
+mve_pred16_t __arm_vcmpeqq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
+mve_pred16_t __arm_vcmpeqq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
+mve_pred16_t __arm_vcmpeqq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
+mve_pred16_t __arm_vcmpeqq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
+mve_pred16_t __arm_vcmpeqq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
+mve_pred16_t __arm_vcmpeqq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
+mve_pred16_t __arm_vcmpeqq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
+mve_pred16_t __arm_vcmpeqq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
+mve_pred16_t __arm_vcmpeqq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
+mve_pred16_t __arm_vcmpeqq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
+mve_pred16_t __arm_vcmpeqq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
+mve_pred16_t __arm_vcmpeqq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
+mve_pred16_t __arm_vcmpeqq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
+mve_pred16_t __arm_vcmpeqq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
+mve_pred16_t __arm_vcmpeqq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s16)))
+mve_pred16_t __arm_vcmpeqq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s16)))
+mve_pred16_t __arm_vcmpeqq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s32)))
+mve_pred16_t __arm_vcmpeqq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s32)))
+mve_pred16_t __arm_vcmpeqq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s8)))
+mve_pred16_t __arm_vcmpeqq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s8)))
+mve_pred16_t __arm_vcmpeqq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u16)))
+mve_pred16_t __arm_vcmpeqq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u16)))
+mve_pred16_t __arm_vcmpeqq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u32)))
+mve_pred16_t __arm_vcmpeqq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u32)))
+mve_pred16_t __arm_vcmpeqq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u8)))
+mve_pred16_t __arm_vcmpeqq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u8)))
+mve_pred16_t __arm_vcmpeqq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
+mve_pred16_t __arm_vcmpgeq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
+mve_pred16_t __arm_vcmpgeq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
+mve_pred16_t __arm_vcmpgeq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
+mve_pred16_t __arm_vcmpgeq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
+mve_pred16_t __arm_vcmpgeq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
+mve_pred16_t __arm_vcmpgeq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
+mve_pred16_t __arm_vcmpgeq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
+mve_pred16_t __arm_vcmpgeq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
+mve_pred16_t __arm_vcmpgeq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
+mve_pred16_t __arm_vcmpgeq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
+mve_pred16_t __arm_vcmpgeq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
+mve_pred16_t __arm_vcmpgeq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
+mve_pred16_t __arm_vcmpgeq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
+mve_pred16_t __arm_vcmpgeq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
+mve_pred16_t __arm_vcmpgeq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
+mve_pred16_t __arm_vcmpgeq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
+mve_pred16_t __arm_vcmpgeq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
+mve_pred16_t __arm_vcmpgeq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s16)))
+mve_pred16_t __arm_vcmpgeq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s16)))
+mve_pred16_t __arm_vcmpgeq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s32)))
+mve_pred16_t __arm_vcmpgeq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s32)))
+mve_pred16_t __arm_vcmpgeq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s8)))
+mve_pred16_t __arm_vcmpgeq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s8)))
+mve_pred16_t __arm_vcmpgeq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
+mve_pred16_t __arm_vcmpgtq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
+mve_pred16_t __arm_vcmpgtq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
+mve_pred16_t __arm_vcmpgtq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
+mve_pred16_t __arm_vcmpgtq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
+mve_pred16_t __arm_vcmpgtq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
+mve_pred16_t __arm_vcmpgtq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
+mve_pred16_t __arm_vcmpgtq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
+mve_pred16_t __arm_vcmpgtq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
+mve_pred16_t __arm_vcmpgtq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
+mve_pred16_t __arm_vcmpgtq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
+mve_pred16_t __arm_vcmpgtq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
+mve_pred16_t __arm_vcmpgtq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
+mve_pred16_t __arm_vcmpgtq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
+mve_pred16_t __arm_vcmpgtq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
+mve_pred16_t __arm_vcmpgtq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
+mve_pred16_t __arm_vcmpgtq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
+mve_pred16_t __arm_vcmpgtq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
+mve_pred16_t __arm_vcmpgtq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s16)))
+mve_pred16_t __arm_vcmpgtq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s16)))
+mve_pred16_t __arm_vcmpgtq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s32)))
+mve_pred16_t __arm_vcmpgtq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s32)))
+mve_pred16_t __arm_vcmpgtq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s8)))
+mve_pred16_t __arm_vcmpgtq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s8)))
+mve_pred16_t __arm_vcmpgtq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
+mve_pred16_t __arm_vcmphiq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
+mve_pred16_t __arm_vcmphiq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
+mve_pred16_t __arm_vcmphiq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
+mve_pred16_t __arm_vcmphiq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
+mve_pred16_t __arm_vcmphiq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
+mve_pred16_t __arm_vcmphiq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u16)))
+mve_pred16_t __arm_vcmphiq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u16)))
+mve_pred16_t __arm_vcmphiq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u32)))
+mve_pred16_t __arm_vcmphiq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u32)))
+mve_pred16_t __arm_vcmphiq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u8)))
+mve_pred16_t __arm_vcmphiq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u8)))
+mve_pred16_t __arm_vcmphiq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u16)))
+mve_pred16_t __arm_vcmphiq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u16)))
+mve_pred16_t __arm_vcmphiq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u32)))
+mve_pred16_t __arm_vcmphiq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u32)))
+mve_pred16_t __arm_vcmphiq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u8)))
+mve_pred16_t __arm_vcmphiq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u8)))
+mve_pred16_t __arm_vcmphiq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u16)))
+mve_pred16_t __arm_vcmphiq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u16)))
+mve_pred16_t __arm_vcmphiq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u32)))
+mve_pred16_t __arm_vcmphiq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u32)))
+mve_pred16_t __arm_vcmphiq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u8)))
+mve_pred16_t __arm_vcmphiq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u8)))
+mve_pred16_t __arm_vcmphiq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
+mve_pred16_t __arm_vcmpleq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
+mve_pred16_t __arm_vcmpleq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
+mve_pred16_t __arm_vcmpleq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
+mve_pred16_t __arm_vcmpleq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
+mve_pred16_t __arm_vcmpleq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
+mve_pred16_t __arm_vcmpleq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s16)))
+mve_pred16_t __arm_vcmpleq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s16)))
+mve_pred16_t __arm_vcmpleq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s32)))
+mve_pred16_t __arm_vcmpleq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s32)))
+mve_pred16_t __arm_vcmpleq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s8)))
+mve_pred16_t __arm_vcmpleq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s8)))
+mve_pred16_t __arm_vcmpleq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s16)))
+mve_pred16_t __arm_vcmpleq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s16)))
+mve_pred16_t __arm_vcmpleq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s32)))
+mve_pred16_t __arm_vcmpleq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s32)))
+mve_pred16_t __arm_vcmpleq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s8)))
+mve_pred16_t __arm_vcmpleq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s8)))
+mve_pred16_t __arm_vcmpleq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s16)))
+mve_pred16_t __arm_vcmpleq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s16)))
+mve_pred16_t __arm_vcmpleq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s32)))
+mve_pred16_t __arm_vcmpleq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s32)))
+mve_pred16_t __arm_vcmpleq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s8)))
+mve_pred16_t __arm_vcmpleq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s8)))
+mve_pred16_t __arm_vcmpleq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
+mve_pred16_t __arm_vcmpltq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
+mve_pred16_t __arm_vcmpltq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
+mve_pred16_t __arm_vcmpltq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
+mve_pred16_t __arm_vcmpltq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
+mve_pred16_t __arm_vcmpltq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
+mve_pred16_t __arm_vcmpltq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s16)))
+mve_pred16_t __arm_vcmpltq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s16)))
+mve_pred16_t __arm_vcmpltq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s32)))
+mve_pred16_t __arm_vcmpltq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s32)))
+mve_pred16_t __arm_vcmpltq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s8)))
+mve_pred16_t __arm_vcmpltq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s8)))
+mve_pred16_t __arm_vcmpltq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s16)))
+mve_pred16_t __arm_vcmpltq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s16)))
+mve_pred16_t __arm_vcmpltq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s32)))
+mve_pred16_t __arm_vcmpltq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s32)))
+mve_pred16_t __arm_vcmpltq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s8)))
+mve_pred16_t __arm_vcmpltq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s8)))
+mve_pred16_t __arm_vcmpltq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s16)))
+mve_pred16_t __arm_vcmpltq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s16)))
+mve_pred16_t __arm_vcmpltq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s32)))
+mve_pred16_t __arm_vcmpltq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s32)))
+mve_pred16_t __arm_vcmpltq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s8)))
+mve_pred16_t __arm_vcmpltq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s8)))
+mve_pred16_t __arm_vcmpltq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
+mve_pred16_t __arm_vcmpneq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
+mve_pred16_t __arm_vcmpneq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
+mve_pred16_t __arm_vcmpneq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
+mve_pred16_t __arm_vcmpneq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
+mve_pred16_t __arm_vcmpneq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
+mve_pred16_t __arm_vcmpneq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
+mve_pred16_t __arm_vcmpneq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
+mve_pred16_t __arm_vcmpneq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
+mve_pred16_t __arm_vcmpneq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
+mve_pred16_t __arm_vcmpneq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
+mve_pred16_t __arm_vcmpneq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
+mve_pred16_t __arm_vcmpneq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s16)))
+mve_pred16_t __arm_vcmpneq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s16)))
+mve_pred16_t __arm_vcmpneq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s32)))
+mve_pred16_t __arm_vcmpneq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s32)))
+mve_pred16_t __arm_vcmpneq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s8)))
+mve_pred16_t __arm_vcmpneq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s8)))
+mve_pred16_t __arm_vcmpneq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u16)))
+mve_pred16_t __arm_vcmpneq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u16)))
+mve_pred16_t __arm_vcmpneq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u32)))
+mve_pred16_t __arm_vcmpneq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u32)))
+mve_pred16_t __arm_vcmpneq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u8)))
+mve_pred16_t __arm_vcmpneq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u8)))
+mve_pred16_t __arm_vcmpneq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s16)))
+mve_pred16_t __arm_vcmpneq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s16)))
+mve_pred16_t __arm_vcmpneq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s32)))
+mve_pred16_t __arm_vcmpneq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s32)))
+mve_pred16_t __arm_vcmpneq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s8)))
+mve_pred16_t __arm_vcmpneq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s8)))
+mve_pred16_t __arm_vcmpneq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u16)))
+mve_pred16_t __arm_vcmpneq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u16)))
+mve_pred16_t __arm_vcmpneq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u32)))
+mve_pred16_t __arm_vcmpneq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u32)))
+mve_pred16_t __arm_vcmpneq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u8)))
+mve_pred16_t __arm_vcmpneq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u8)))
+mve_pred16_t __arm_vcmpneq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s16)))
+mve_pred16_t __arm_vcmpneq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s16)))
+mve_pred16_t __arm_vcmpneq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s32)))
+mve_pred16_t __arm_vcmpneq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s32)))
+mve_pred16_t __arm_vcmpneq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s8)))
+mve_pred16_t __arm_vcmpneq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s8)))
+mve_pred16_t __arm_vcmpneq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u16)))
+mve_pred16_t __arm_vcmpneq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u16)))
+mve_pred16_t __arm_vcmpneq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u32)))
+mve_pred16_t __arm_vcmpneq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u32)))
+mve_pred16_t __arm_vcmpneq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u8)))
+mve_pred16_t __arm_vcmpneq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u8)))
+mve_pred16_t __arm_vcmpneq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s16)))
+int16x8_t __arm_vcreateq_s16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s32)))
+int32x4_t __arm_vcreateq_s32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s64)))
+int64x2_t __arm_vcreateq_s64(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s8)))
+int8x16_t __arm_vcreateq_s8(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u16)))
+uint16x8_t __arm_vcreateq_u16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u32)))
+uint32x4_t __arm_vcreateq_u32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u64)))
+uint64x2_t __arm_vcreateq_u64(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u8)))
+uint8x16_t __arm_vcreateq_u8(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s16)))
+int16_t __arm_vgetq_lane_s16(int16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s16)))
+int16_t __arm_vgetq_lane(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s32)))
+int32_t __arm_vgetq_lane_s32(int32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s32)))
+int32_t __arm_vgetq_lane(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s64)))
+int64_t __arm_vgetq_lane_s64(int64x2_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s64)))
+int64_t __arm_vgetq_lane(int64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s8)))
+int8_t __arm_vgetq_lane_s8(int8x16_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s8)))
+int8_t __arm_vgetq_lane(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u16)))
+uint16_t __arm_vgetq_lane_u16(uint16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u16)))
+uint16_t __arm_vgetq_lane(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u32)))
+uint32_t __arm_vgetq_lane_u32(uint32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u32)))
+uint32_t __arm_vgetq_lane(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u64)))
+uint64_t __arm_vgetq_lane_u64(uint64x2_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u64)))
+uint64_t __arm_vgetq_lane(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u8)))
+uint8_t __arm_vgetq_lane_u8(uint8x16_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u8)))
+uint8_t __arm_vgetq_lane(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s16)))
+int16x8_t __arm_vld1q_s16(const int16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s16)))
+int16x8_t __arm_vld1q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s32)))
+int32x4_t __arm_vld1q_s32(const int32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s32)))
+int32x4_t __arm_vld1q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s8)))
+int8x16_t __arm_vld1q_s8(const int8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s8)))
+int8x16_t __arm_vld1q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u16)))
+uint16x8_t __arm_vld1q_u16(const uint16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u16)))
+uint16x8_t __arm_vld1q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u32)))
+uint32x4_t __arm_vld1q_u32(const uint32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u32)))
+uint32x4_t __arm_vld1q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u8)))
+uint8x16_t __arm_vld1q_u8(const uint8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u8)))
+uint8x16_t __arm_vld1q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s16)))
+int16x8_t __arm_vld1q_z_s16(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s16)))
+int16x8_t __arm_vld1q_z(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s32)))
+int32x4_t __arm_vld1q_z_s32(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s32)))
+int32x4_t __arm_vld1q_z(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s8)))
+int8x16_t __arm_vld1q_z_s8(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s8)))
+int8x16_t __arm_vld1q_z(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u16)))
+uint16x8_t __arm_vld1q_z_u16(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u16)))
+uint16x8_t __arm_vld1q_z(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u32)))
+uint32x4_t __arm_vld1q_z_u32(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u32)))
+uint32x4_t __arm_vld1q_z(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u8)))
+uint8x16_t __arm_vld1q_z_u8(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u8)))
+uint8x16_t __arm_vld1q_z(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s16)))
+int16x8x2_t __arm_vld2q_s16(const int16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s16)))
+int16x8x2_t __arm_vld2q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s32)))
+int32x4x2_t __arm_vld2q_s32(const int32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s32)))
+int32x4x2_t __arm_vld2q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s8)))
+int8x16x2_t __arm_vld2q_s8(const int8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s8)))
+int8x16x2_t __arm_vld2q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u16)))
+uint16x8x2_t __arm_vld2q_u16(const uint16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u16)))
+uint16x8x2_t __arm_vld2q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u32)))
+uint32x4x2_t __arm_vld2q_u32(const uint32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u32)))
+uint32x4x2_t __arm_vld2q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u8)))
+uint8x16x2_t __arm_vld2q_u8(const uint8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u8)))
+uint8x16x2_t __arm_vld2q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s16)))
+int16x8x4_t __arm_vld4q_s16(const int16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s16)))
+int16x8x4_t __arm_vld4q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s32)))
+int32x4x4_t __arm_vld4q_s32(const int32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s32)))
+int32x4x4_t __arm_vld4q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s8)))
+int8x16x4_t __arm_vld4q_s8(const int8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s8)))
+int8x16x4_t __arm_vld4q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u16)))
+uint16x8x4_t __arm_vld4q_u16(const uint16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u16)))
+uint16x8x4_t __arm_vld4q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u32)))
+uint32x4x4_t __arm_vld4q_u32(const uint32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u32)))
+uint32x4x4_t __arm_vld4q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u8)))
+uint8x16x4_t __arm_vld4q_u8(const uint8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u8)))
+uint8x16x4_t __arm_vld4q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
+int16x8_t __arm_vldrbq_gather_offset_s16(const int8_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
+int16x8_t __arm_vldrbq_gather_offset(const int8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
+int32x4_t __arm_vldrbq_gather_offset_s32(const int8_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
+int32x4_t __arm_vldrbq_gather_offset(const int8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
+int8x16_t __arm_vldrbq_gather_offset_s8(const int8_t *, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
+int8x16_t __arm_vldrbq_gather_offset(const int8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
+uint16x8_t __arm_vldrbq_gather_offset_u16(const uint8_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
+uint16x8_t __arm_vldrbq_gather_offset(const uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
+uint32x4_t __arm_vldrbq_gather_offset_u32(const uint8_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
+uint32x4_t __arm_vldrbq_gather_offset(const uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
+uint8x16_t __arm_vldrbq_gather_offset_u8(const uint8_t *, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
+uint8x16_t __arm_vldrbq_gather_offset(const uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
+int16x8_t __arm_vldrbq_gather_offset_z_s16(const int8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
+int16x8_t __arm_vldrbq_gather_offset_z(const int8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
+int32x4_t __arm_vldrbq_gather_offset_z_s32(const int8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
+int32x4_t __arm_vldrbq_gather_offset_z(const int8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
+int8x16_t __arm_vldrbq_gather_offset_z_s8(const int8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
+int8x16_t __arm_vldrbq_gather_offset_z(const int8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
+uint16x8_t __arm_vldrbq_gather_offset_z_u16(const uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
+uint16x8_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrbq_gather_offset_z_u32(const uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
+uint8x16_t __arm_vldrbq_gather_offset_z_u8(const uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
+uint8x16_t __arm_vldrbq_gather_offset_z(const uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s16)))
+int16x8_t __arm_vldrbq_s16(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s32)))
+int32x4_t __arm_vldrbq_s32(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s8)))
+int8x16_t __arm_vldrbq_s8(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u16)))
+uint16x8_t __arm_vldrbq_u16(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u32)))
+uint32x4_t __arm_vldrbq_u32(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u8)))
+uint8x16_t __arm_vldrbq_u8(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s16)))
+int16x8_t __arm_vldrbq_z_s16(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s32)))
+int32x4_t __arm_vldrbq_z_s32(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s8)))
+int8x16_t __arm_vldrbq_z_s8(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u16)))
+uint16x8_t __arm_vldrbq_z_u16(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u32)))
+uint32x4_t __arm_vldrbq_z_u32(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u8)))
+uint8x16_t __arm_vldrbq_z_u8(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_s64)))
+int64x2_t __arm_vldrdq_gather_base_s64(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_u64)))
+uint64x2_t __arm_vldrdq_gather_base_u64(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_s64)))
+int64x2_t __arm_vldrdq_gather_base_wb_s64(uint64x2_t *, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_u64)))
+uint64x2_t __arm_vldrdq_gather_base_wb_u64(uint64x2_t *, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_s64)))
+int64x2_t __arm_vldrdq_gather_base_wb_z_s64(uint64x2_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_u64)))
+uint64x2_t __arm_vldrdq_gather_base_wb_z_u64(uint64x2_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_z_s64)))
+int64x2_t __arm_vldrdq_gather_base_z_s64(uint64x2_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_z_u64)))
+uint64x2_t __arm_vldrdq_gather_base_z_u64(uint64x2_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
+int64x2_t __arm_vldrdq_gather_offset_s64(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
+int64x2_t __arm_vldrdq_gather_offset(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
+uint64x2_t __arm_vldrdq_gather_offset_u64(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
+uint64x2_t __arm_vldrdq_gather_offset(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
+int64x2_t __arm_vldrdq_gather_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
+int64x2_t __arm_vldrdq_gather_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
+uint64x2_t __arm_vldrdq_gather_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
+uint64x2_t __arm_vldrdq_gather_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
+int64x2_t __arm_vldrdq_gather_shifted_offset_s64(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
+int64x2_t __arm_vldrdq_gather_shifted_offset(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
+uint64x2_t __arm_vldrdq_gather_shifted_offset_u64(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
+uint64x2_t __arm_vldrdq_gather_shifted_offset(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
+int64x2_t __arm_vldrdq_gather_shifted_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
+int64x2_t __arm_vldrdq_gather_shifted_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
+uint64x2_t __arm_vldrdq_gather_shifted_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
+uint64x2_t __arm_vldrdq_gather_shifted_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
+int16x8_t __arm_vldrhq_gather_offset_s16(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
+int16x8_t __arm_vldrhq_gather_offset(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
+int32x4_t __arm_vldrhq_gather_offset_s32(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
+int32x4_t __arm_vldrhq_gather_offset(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
+uint16x8_t __arm_vldrhq_gather_offset_u16(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
+uint16x8_t __arm_vldrhq_gather_offset(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
+uint32x4_t __arm_vldrhq_gather_offset_u32(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
+uint32x4_t __arm_vldrhq_gather_offset(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
+int16x8_t __arm_vldrhq_gather_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
+int16x8_t __arm_vldrhq_gather_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
+int32x4_t __arm_vldrhq_gather_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
+int32x4_t __arm_vldrhq_gather_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
+uint16x8_t __arm_vldrhq_gather_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
+uint16x8_t __arm_vldrhq_gather_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrhq_gather_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrhq_gather_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
+int16x8_t __arm_vldrhq_gather_shifted_offset_s16(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
+int16x8_t __arm_vldrhq_gather_shifted_offset(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
+int32x4_t __arm_vldrhq_gather_shifted_offset_s32(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
+int32x4_t __arm_vldrhq_gather_shifted_offset(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
+uint16x8_t __arm_vldrhq_gather_shifted_offset_u16(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
+uint16x8_t __arm_vldrhq_gather_shifted_offset(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
+uint32x4_t __arm_vldrhq_gather_shifted_offset_u32(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
+uint32x4_t __arm_vldrhq_gather_shifted_offset(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
+int16x8_t __arm_vldrhq_gather_shifted_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
+int16x8_t __arm_vldrhq_gather_shifted_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
+int32x4_t __arm_vldrhq_gather_shifted_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
+int32x4_t __arm_vldrhq_gather_shifted_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
+uint16x8_t __arm_vldrhq_gather_shifted_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
+uint16x8_t __arm_vldrhq_gather_shifted_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
+uint32x4_t __arm_vldrhq_gather_shifted_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
+uint32x4_t __arm_vldrhq_gather_shifted_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_s16)))
+int16x8_t __arm_vldrhq_s16(const int16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_s32)))
+int32x4_t __arm_vldrhq_s32(const int16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_u16)))
+uint16x8_t __arm_vldrhq_u16(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_u32)))
+uint32x4_t __arm_vldrhq_u32(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_s16)))
+int16x8_t __arm_vldrhq_z_s16(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_s32)))
+int32x4_t __arm_vldrhq_z_s32(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_u16)))
+uint16x8_t __arm_vldrhq_z_u16(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_u32)))
+uint32x4_t __arm_vldrhq_z_u32(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_s32)))
+int32x4_t __arm_vldrwq_gather_base_s32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_u32)))
+uint32x4_t __arm_vldrwq_gather_base_u32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_s32)))
+int32x4_t __arm_vldrwq_gather_base_wb_s32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_u32)))
+uint32x4_t __arm_vldrwq_gather_base_wb_u32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_s32)))
+int32x4_t __arm_vldrwq_gather_base_wb_z_s32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_u32)))
+uint32x4_t __arm_vldrwq_gather_base_wb_z_u32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_s32)))
+int32x4_t __arm_vldrwq_gather_base_z_s32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_u32)))
+uint32x4_t __arm_vldrwq_gather_base_z_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
+int32x4_t __arm_vldrwq_gather_offset_s32(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
+int32x4_t __arm_vldrwq_gather_offset(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
+uint32x4_t __arm_vldrwq_gather_offset_u32(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
+uint32x4_t __arm_vldrwq_gather_offset(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
+int32x4_t __arm_vldrwq_gather_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
+int32x4_t __arm_vldrwq_gather_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrwq_gather_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
+uint32x4_t __arm_vldrwq_gather_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
+int32x4_t __arm_vldrwq_gather_shifted_offset_s32(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
+int32x4_t __arm_vldrwq_gather_shifted_offset(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
+uint32x4_t __arm_vldrwq_gather_shifted_offset_u32(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
+uint32x4_t __arm_vldrwq_gather_shifted_offset(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
+int32x4_t __arm_vldrwq_gather_shifted_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
+int32x4_t __arm_vldrwq_gather_shifted_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
+uint32x4_t __arm_vldrwq_gather_shifted_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
+uint32x4_t __arm_vldrwq_gather_shifted_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_s32)))
+int32x4_t __arm_vldrwq_s32(const int32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_u32)))
+uint32x4_t __arm_vldrwq_u32(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_s32)))
+int32x4_t __arm_vldrwq_z_s32(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_u32)))
+uint32x4_t __arm_vldrwq_z_u32(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s16)))
+int16_t __arm_vmaxvq_s16(int16_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s16)))
+int16_t __arm_vmaxvq(int16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s32)))
+int32_t __arm_vmaxvq_s32(int32_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s32)))
+int32_t __arm_vmaxvq(int32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s8)))
+int8_t __arm_vmaxvq_s8(int8_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s8)))
+int8_t __arm_vmaxvq(int8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u16)))
+uint16_t __arm_vmaxvq_u16(uint16_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u16)))
+uint16_t __arm_vmaxvq(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u32)))
+uint32_t __arm_vmaxvq_u32(uint32_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u32)))
+uint32_t __arm_vmaxvq(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u8)))
+uint8_t __arm_vmaxvq_u8(uint8_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u8)))
+uint8_t __arm_vmaxvq(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s16)))
+int16_t __arm_vminvq_s16(int16_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s16)))
+int16_t __arm_vminvq(int16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s32)))
+int32_t __arm_vminvq_s32(int32_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s32)))
+int32_t __arm_vminvq(int32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s8)))
+int8_t __arm_vminvq_s8(int8_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s8)))
+int8_t __arm_vminvq(int8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u16)))
+uint16_t __arm_vminvq_u16(uint16_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u16)))
+uint16_t __arm_vminvq(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u32)))
+uint32_t __arm_vminvq_u32(uint32_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u32)))
+uint32_t __arm_vminvq(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u8)))
+uint8_t __arm_vminvq_u8(uint8_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u8)))
+uint8_t __arm_vminvq(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
+int16x8_t __arm_vreinterpretq_s16_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
+int16x8_t __arm_vreinterpretq_s16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
+int16x8_t __arm_vreinterpretq_s16_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
+int16x8_t __arm_vreinterpretq_s16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
+int16x8_t __arm_vreinterpretq_s16_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
+int16x8_t __arm_vreinterpretq_s16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
+int16x8_t __arm_vreinterpretq_s16_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
+int16x8_t __arm_vreinterpretq_s16(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
+int16x8_t __arm_vreinterpretq_s16_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
+int16x8_t __arm_vreinterpretq_s16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
+int16x8_t __arm_vreinterpretq_s16_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
+int16x8_t __arm_vreinterpretq_s16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
+int16x8_t __arm_vreinterpretq_s16_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
+int16x8_t __arm_vreinterpretq_s16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
+int32x4_t __arm_vreinterpretq_s32_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
+int32x4_t __arm_vreinterpretq_s32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
+int32x4_t __arm_vreinterpretq_s32_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
+int32x4_t __arm_vreinterpretq_s32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
+int32x4_t __arm_vreinterpretq_s32_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
+int32x4_t __arm_vreinterpretq_s32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
+int32x4_t __arm_vreinterpretq_s32_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
+int32x4_t __arm_vreinterpretq_s32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
+int32x4_t __arm_vreinterpretq_s32_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
+int32x4_t __arm_vreinterpretq_s32(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
+int32x4_t __arm_vreinterpretq_s32_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
+int32x4_t __arm_vreinterpretq_s32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
+int32x4_t __arm_vreinterpretq_s32_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
+int32x4_t __arm_vreinterpretq_s32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
+int64x2_t __arm_vreinterpretq_s64_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
+int64x2_t __arm_vreinterpretq_s64(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
+int64x2_t __arm_vreinterpretq_s64_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
+int64x2_t __arm_vreinterpretq_s64(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
+int64x2_t __arm_vreinterpretq_s64_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
+int64x2_t __arm_vreinterpretq_s64(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
+int64x2_t __arm_vreinterpretq_s64_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
+int64x2_t __arm_vreinterpretq_s64(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
+int64x2_t __arm_vreinterpretq_s64_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
+int64x2_t __arm_vreinterpretq_s64(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
+int64x2_t __arm_vreinterpretq_s64_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
+int64x2_t __arm_vreinterpretq_s64(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
+int64x2_t __arm_vreinterpretq_s64_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
+int64x2_t __arm_vreinterpretq_s64(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
+int8x16_t __arm_vreinterpretq_s8_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
+int8x16_t __arm_vreinterpretq_s8(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
+int8x16_t __arm_vreinterpretq_s8_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
+int8x16_t __arm_vreinterpretq_s8(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
+int8x16_t __arm_vreinterpretq_s8_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
+int8x16_t __arm_vreinterpretq_s8(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
+int8x16_t __arm_vreinterpretq_s8_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
+int8x16_t __arm_vreinterpretq_s8(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
+int8x16_t __arm_vreinterpretq_s8_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
+int8x16_t __arm_vreinterpretq_s8(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
+int8x16_t __arm_vreinterpretq_s8_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
+int8x16_t __arm_vreinterpretq_s8(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
+int8x16_t __arm_vreinterpretq_s8_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
+int8x16_t __arm_vreinterpretq_s8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
+uint16x8_t __arm_vreinterpretq_u16_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
+uint16x8_t __arm_vreinterpretq_u16(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
+uint16x8_t __arm_vreinterpretq_u16_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
+uint16x8_t __arm_vreinterpretq_u16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
+uint16x8_t __arm_vreinterpretq_u16_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
+uint16x8_t __arm_vreinterpretq_u16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
+uint16x8_t __arm_vreinterpretq_u16_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
+uint16x8_t __arm_vreinterpretq_u16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
+uint16x8_t __arm_vreinterpretq_u16_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
+uint16x8_t __arm_vreinterpretq_u16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
+uint16x8_t __arm_vreinterpretq_u16_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
+uint16x8_t __arm_vreinterpretq_u16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
+uint16x8_t __arm_vreinterpretq_u16_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
+uint16x8_t __arm_vreinterpretq_u16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
+uint32x4_t __arm_vreinterpretq_u32_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
+uint32x4_t __arm_vreinterpretq_u32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
+uint32x4_t __arm_vreinterpretq_u32_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
+uint32x4_t __arm_vreinterpretq_u32(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
+uint32x4_t __arm_vreinterpretq_u32_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
+uint32x4_t __arm_vreinterpretq_u32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
+uint32x4_t __arm_vreinterpretq_u32_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
+uint32x4_t __arm_vreinterpretq_u32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
+uint32x4_t __arm_vreinterpretq_u32_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
+uint32x4_t __arm_vreinterpretq_u32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
+uint32x4_t __arm_vreinterpretq_u32_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
+uint32x4_t __arm_vreinterpretq_u32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
+uint32x4_t __arm_vreinterpretq_u32_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
+uint32x4_t __arm_vreinterpretq_u32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
+uint64x2_t __arm_vreinterpretq_u64_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
+uint64x2_t __arm_vreinterpretq_u64(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
+uint64x2_t __arm_vreinterpretq_u64_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
+uint64x2_t __arm_vreinterpretq_u64(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
+uint64x2_t __arm_vreinterpretq_u64_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
+uint64x2_t __arm_vreinterpretq_u64(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
+uint64x2_t __arm_vreinterpretq_u64_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
+uint64x2_t __arm_vreinterpretq_u64(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
+uint64x2_t __arm_vreinterpretq_u64_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
+uint64x2_t __arm_vreinterpretq_u64(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
+uint64x2_t __arm_vreinterpretq_u64_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
+uint64x2_t __arm_vreinterpretq_u64(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
+uint64x2_t __arm_vreinterpretq_u64_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
+uint64x2_t __arm_vreinterpretq_u64(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
+uint8x16_t __arm_vreinterpretq_u8_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
+uint8x16_t __arm_vreinterpretq_u8(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
+uint8x16_t __arm_vreinterpretq_u8_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
+uint8x16_t __arm_vreinterpretq_u8(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
+uint8x16_t __arm_vreinterpretq_u8_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
+uint8x16_t __arm_vreinterpretq_u8(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
+uint8x16_t __arm_vreinterpretq_u8_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
+uint8x16_t __arm_vreinterpretq_u8(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
+uint8x16_t __arm_vreinterpretq_u8_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
+uint8x16_t __arm_vreinterpretq_u8(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
+uint8x16_t __arm_vreinterpretq_u8_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
+uint8x16_t __arm_vreinterpretq_u8(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
+uint8x16_t __arm_vreinterpretq_u8_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
+uint8x16_t __arm_vreinterpretq_u8(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s16)))
+int16x8_t __arm_vsetq_lane_s16(int16_t, int16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s16)))
+int16x8_t __arm_vsetq_lane(int16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s32)))
+int32x4_t __arm_vsetq_lane_s32(int32_t, int32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s32)))
+int32x4_t __arm_vsetq_lane(int32_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s64)))
+int64x2_t __arm_vsetq_lane_s64(int64_t, int64x2_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s64)))
+int64x2_t __arm_vsetq_lane(int64_t, int64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s8)))
+int8x16_t __arm_vsetq_lane_s8(int8_t, int8x16_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s8)))
+int8x16_t __arm_vsetq_lane(int8_t, int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u16)))
+uint16x8_t __arm_vsetq_lane_u16(uint16_t, uint16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u16)))
+uint16x8_t __arm_vsetq_lane(uint16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u32)))
+uint32x4_t __arm_vsetq_lane_u32(uint32_t, uint32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u32)))
+uint32x4_t __arm_vsetq_lane(uint32_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u64)))
+uint64x2_t __arm_vsetq_lane_u64(uint64_t, uint64x2_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u64)))
+uint64x2_t __arm_vsetq_lane(uint64_t, uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u8)))
+uint8x16_t __arm_vsetq_lane_u8(uint8_t, uint8x16_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u8)))
+uint8x16_t __arm_vsetq_lane(uint8_t, uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s16)))
+void __arm_vst1q_p_s16(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s16)))
+void __arm_vst1q_p(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s32)))
+void __arm_vst1q_p_s32(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s32)))
+void __arm_vst1q_p(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s8)))
+void __arm_vst1q_p_s8(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s8)))
+void __arm_vst1q_p(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u16)))
+void __arm_vst1q_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u16)))
+void __arm_vst1q_p(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u32)))
+void __arm_vst1q_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u32)))
+void __arm_vst1q_p(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u8)))
+void __arm_vst1q_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u8)))
+void __arm_vst1q_p(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s16)))
+void __arm_vst1q_s16(int16_t *, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s16)))
+void __arm_vst1q(int16_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s32)))
+void __arm_vst1q_s32(int32_t *, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s32)))
+void __arm_vst1q(int32_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s8)))
+void __arm_vst1q_s8(int8_t *, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s8)))
+void __arm_vst1q(int8_t *, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u16)))
+void __arm_vst1q_u16(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u16)))
+void __arm_vst1q(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u32)))
+void __arm_vst1q_u32(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u32)))
+void __arm_vst1q(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u8)))
+void __arm_vst1q_u8(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u8)))
+void __arm_vst1q(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s16)))
+void __arm_vst2q_s16(int16_t *, int16x8x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s16)))
+void __arm_vst2q(int16_t *, int16x8x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s32)))
+void __arm_vst2q_s32(int32_t *, int32x4x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s32)))
+void __arm_vst2q(int32_t *, int32x4x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s8)))
+void __arm_vst2q_s8(int8_t *, int8x16x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s8)))
+void __arm_vst2q(int8_t *, int8x16x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u16)))
+void __arm_vst2q_u16(uint16_t *, uint16x8x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u16)))
+void __arm_vst2q(uint16_t *, uint16x8x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u32)))
+void __arm_vst2q_u32(uint32_t *, uint32x4x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u32)))
+void __arm_vst2q(uint32_t *, uint32x4x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u8)))
+void __arm_vst2q_u8(uint8_t *, uint8x16x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u8)))
+void __arm_vst2q(uint8_t *, uint8x16x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s16)))
+void __arm_vst4q_s16(int16_t *, int16x8x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s16)))
+void __arm_vst4q(int16_t *, int16x8x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s32)))
+void __arm_vst4q_s32(int32_t *, int32x4x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s32)))
+void __arm_vst4q(int32_t *, int32x4x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s8)))
+void __arm_vst4q_s8(int8_t *, int8x16x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s8)))
+void __arm_vst4q(int8_t *, int8x16x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u16)))
+void __arm_vst4q_u16(uint16_t *, uint16x8x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u16)))
+void __arm_vst4q(uint16_t *, uint16x8x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u32)))
+void __arm_vst4q_u32(uint32_t *, uint32x4x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u32)))
+void __arm_vst4q(uint32_t *, uint32x4x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u8)))
+void __arm_vst4q_u8(uint8_t *, uint8x16x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u8)))
+void __arm_vst4q(uint8_t *, uint8x16x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s16)))
+void __arm_vstrbq_p_s16(int8_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s16)))
+void __arm_vstrbq_p(int8_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s32)))
+void __arm_vstrbq_p_s32(int8_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s32)))
+void __arm_vstrbq_p(int8_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s8)))
+void __arm_vstrbq_p_s8(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s8)))
+void __arm_vstrbq_p(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u16)))
+void __arm_vstrbq_p_u16(uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u16)))
+void __arm_vstrbq_p(uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u32)))
+void __arm_vstrbq_p_u32(uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u32)))
+void __arm_vstrbq_p(uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u8)))
+void __arm_vstrbq_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u8)))
+void __arm_vstrbq_p(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s16)))
+void __arm_vstrbq_s16(int8_t *, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s16)))
+void __arm_vstrbq(int8_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s32)))
+void __arm_vstrbq_s32(int8_t *, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s32)))
+void __arm_vstrbq(int8_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s8)))
+void __arm_vstrbq_s8(int8_t *, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s8)))
+void __arm_vstrbq(int8_t *, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
+void __arm_vstrbq_scatter_offset_p_s16(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
+void __arm_vstrbq_scatter_offset_p(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
+void __arm_vstrbq_scatter_offset_p_s32(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
+void __arm_vstrbq_scatter_offset_p(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
+void __arm_vstrbq_scatter_offset_p_s8(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
+void __arm_vstrbq_scatter_offset_p(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
+void __arm_vstrbq_scatter_offset_p_u16(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
+void __arm_vstrbq_scatter_offset_p(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
+void __arm_vstrbq_scatter_offset_p_u32(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
+void __arm_vstrbq_scatter_offset_p(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
+void __arm_vstrbq_scatter_offset_p_u8(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
+void __arm_vstrbq_scatter_offset_p(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
+void __arm_vstrbq_scatter_offset_s16(int8_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
+void __arm_vstrbq_scatter_offset(int8_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
+void __arm_vstrbq_scatter_offset_s32(int8_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
+void __arm_vstrbq_scatter_offset(int8_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
+void __arm_vstrbq_scatter_offset_s8(int8_t *, uint8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
+void __arm_vstrbq_scatter_offset(int8_t *, uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
+void __arm_vstrbq_scatter_offset_u16(uint8_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
+void __arm_vstrbq_scatter_offset(uint8_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
+void __arm_vstrbq_scatter_offset_u32(uint8_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
+void __arm_vstrbq_scatter_offset(uint8_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
+void __arm_vstrbq_scatter_offset_u8(uint8_t *, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
+void __arm_vstrbq_scatter_offset(uint8_t *, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u16)))
+void __arm_vstrbq_u16(uint8_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u16)))
+void __arm_vstrbq(uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u32)))
+void __arm_vstrbq_u32(uint8_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u32)))
+void __arm_vstrbq(uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u8)))
+void __arm_vstrbq_u8(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u8)))
+void __arm_vstrbq(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
+void __arm_vstrdq_scatter_base_p_s64(uint64x2_t, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
+void __arm_vstrdq_scatter_base_p(uint64x2_t, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
+void __arm_vstrdq_scatter_base_p_u64(uint64x2_t, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
+void __arm_vstrdq_scatter_base_p(uint64x2_t, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
+void __arm_vstrdq_scatter_base_s64(uint64x2_t, int, int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
+void __arm_vstrdq_scatter_base(uint64x2_t, int, int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
+void __arm_vstrdq_scatter_base_u64(uint64x2_t, int, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
+void __arm_vstrdq_scatter_base(uint64x2_t, int, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
+void __arm_vstrdq_scatter_base_wb_p_s64(uint64x2_t *, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
+void __arm_vstrdq_scatter_base_wb_p(uint64x2_t *, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
+void __arm_vstrdq_scatter_base_wb_p_u64(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
+void __arm_vstrdq_scatter_base_wb_p(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
+void __arm_vstrdq_scatter_base_wb_s64(uint64x2_t *, int, int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
+void __arm_vstrdq_scatter_base_wb(uint64x2_t *, int, int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
+void __arm_vstrdq_scatter_base_wb_u64(uint64x2_t *, int, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
+void __arm_vstrdq_scatter_base_wb(uint64x2_t *, int, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
+void __arm_vstrdq_scatter_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
+void __arm_vstrdq_scatter_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
+void __arm_vstrdq_scatter_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
+void __arm_vstrdq_scatter_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
+void __arm_vstrdq_scatter_offset_s64(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
+void __arm_vstrdq_scatter_offset(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
+void __arm_vstrdq_scatter_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
+void __arm_vstrdq_scatter_offset(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
+void __arm_vstrdq_scatter_shifted_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
+void __arm_vstrdq_scatter_shifted_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
+void __arm_vstrdq_scatter_shifted_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
+void __arm_vstrdq_scatter_shifted_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
+void __arm_vstrdq_scatter_shifted_offset_s64(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
+void __arm_vstrdq_scatter_shifted_offset(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
+void __arm_vstrdq_scatter_shifted_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
+void __arm_vstrdq_scatter_shifted_offset(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s16)))
+void __arm_vstrhq_p_s16(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s16)))
+void __arm_vstrhq_p(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s32)))
+void __arm_vstrhq_p_s32(int16_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s32)))
+void __arm_vstrhq_p(int16_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u16)))
+void __arm_vstrhq_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u16)))
+void __arm_vstrhq_p(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u32)))
+void __arm_vstrhq_p_u32(uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u32)))
+void __arm_vstrhq_p(uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s16)))
+void __arm_vstrhq_s16(int16_t *, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s16)))
+void __arm_vstrhq(int16_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s32)))
+void __arm_vstrhq_s32(int16_t *, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s32)))
+void __arm_vstrhq(int16_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
+void __arm_vstrhq_scatter_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
+void __arm_vstrhq_scatter_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
+void __arm_vstrhq_scatter_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
+void __arm_vstrhq_scatter_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
+void __arm_vstrhq_scatter_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
+void __arm_vstrhq_scatter_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
+void __arm_vstrhq_scatter_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
+void __arm_vstrhq_scatter_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
+void __arm_vstrhq_scatter_offset_s16(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
+void __arm_vstrhq_scatter_offset(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
+void __arm_vstrhq_scatter_offset_s32(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
+void __arm_vstrhq_scatter_offset(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
+void __arm_vstrhq_scatter_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
+void __arm_vstrhq_scatter_offset(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
+void __arm_vstrhq_scatter_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
+void __arm_vstrhq_scatter_offset(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
+void __arm_vstrhq_scatter_shifted_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
+void __arm_vstrhq_scatter_shifted_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
+void __arm_vstrhq_scatter_shifted_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
+void __arm_vstrhq_scatter_shifted_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
+void __arm_vstrhq_scatter_shifted_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
+void __arm_vstrhq_scatter_shifted_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
+void __arm_vstrhq_scatter_shifted_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
+void __arm_vstrhq_scatter_shifted_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
+void __arm_vstrhq_scatter_shifted_offset_s16(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
+void __arm_vstrhq_scatter_shifted_offset(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
+void __arm_vstrhq_scatter_shifted_offset_s32(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
+void __arm_vstrhq_scatter_shifted_offset(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
+void __arm_vstrhq_scatter_shifted_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
+void __arm_vstrhq_scatter_shifted_offset(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
+void __arm_vstrhq_scatter_shifted_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
+void __arm_vstrhq_scatter_shifted_offset(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u16)))
+void __arm_vstrhq_u16(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u16)))
+void __arm_vstrhq(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u32)))
+void __arm_vstrhq_u32(uint16_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u32)))
+void __arm_vstrhq(uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_s32)))
+void __arm_vstrwq_p_s32(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_s32)))
+void __arm_vstrwq_p(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_u32)))
+void __arm_vstrwq_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_u32)))
+void __arm_vstrwq_p(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_s32)))
+void __arm_vstrwq_s32(int32_t *, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_s32)))
+void __arm_vstrwq(int32_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
+void __arm_vstrwq_scatter_base_p_s32(uint32x4_t, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
+void __arm_vstrwq_scatter_base_p(uint32x4_t, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
+void __arm_vstrwq_scatter_base_p_u32(uint32x4_t, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
+void __arm_vstrwq_scatter_base_p(uint32x4_t, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
+void __arm_vstrwq_scatter_base_s32(uint32x4_t, int, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
+void __arm_vstrwq_scatter_base(uint32x4_t, int, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
+void __arm_vstrwq_scatter_base_u32(uint32x4_t, int, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
+void __arm_vstrwq_scatter_base(uint32x4_t, int, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
+void __arm_vstrwq_scatter_base_wb_p_s32(uint32x4_t *, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
+void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
+void __arm_vstrwq_scatter_base_wb_p_u32(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
+void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
+void __arm_vstrwq_scatter_base_wb_s32(uint32x4_t *, int, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
+void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
+void __arm_vstrwq_scatter_base_wb_u32(uint32x4_t *, int, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
+void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
+void __arm_vstrwq_scatter_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
+void __arm_vstrwq_scatter_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
+void __arm_vstrwq_scatter_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
+void __arm_vstrwq_scatter_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
+void __arm_vstrwq_scatter_offset_s32(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
+void __arm_vstrwq_scatter_offset(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
+void __arm_vstrwq_scatter_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
+void __arm_vstrwq_scatter_offset(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
+void __arm_vstrwq_scatter_shifted_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
+void __arm_vstrwq_scatter_shifted_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
+void __arm_vstrwq_scatter_shifted_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
+void __arm_vstrwq_scatter_shifted_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
+void __arm_vstrwq_scatter_shifted_offset_s32(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
+void __arm_vstrwq_scatter_shifted_offset(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
+void __arm_vstrwq_scatter_shifted_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
+void __arm_vstrwq_scatter_shifted_offset(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_u32)))
+void __arm_vstrwq_u32(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_u32)))
+void __arm_vstrwq(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s16)))
+int16x8_t __arm_vsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s16)))
+int16x8_t __arm_vsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s32)))
+int32x4_t __arm_vsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s32)))
+int32x4_t __arm_vsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s8)))
+int8x16_t __arm_vsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s8)))
+int8x16_t __arm_vsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u16)))
+uint16x8_t __arm_vsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u16)))
+uint16x8_t __arm_vsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u32)))
+uint32x4_t __arm_vsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u32)))
+uint32x4_t __arm_vsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u8)))
+uint8x16_t __arm_vsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u8)))
+uint8x16_t __arm_vsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s16)))
+int16x8_t __arm_vsubq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s16)))
+int16x8_t __arm_vsubq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s32)))
+int32x4_t __arm_vsubq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s32)))
+int32x4_t __arm_vsubq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s8)))
+int8x16_t __arm_vsubq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s8)))
+int8x16_t __arm_vsubq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u16)))
+uint16x8_t __arm_vsubq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u16)))
+uint16x8_t __arm_vsubq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u32)))
+uint32x4_t __arm_vsubq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u32)))
+uint32x4_t __arm_vsubq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u8)))
+uint8x16_t __arm_vsubq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u8)))
+uint8x16_t __arm_vsubq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s16)))
+int16x8_t __arm_vuninitializedq(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s32)))
+int32x4_t __arm_vuninitializedq(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s64)))
+int64x2_t __arm_vuninitializedq(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s8)))
+int8x16_t __arm_vuninitializedq(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u16)))
+uint16x8_t __arm_vuninitializedq(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u32)))
+uint32x4_t __arm_vuninitializedq(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u64)))
+uint64x2_t __arm_vuninitializedq(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u8)))
+uint8x16_t __arm_vuninitializedq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s16)))
+int16x8_t __arm_vuninitializedq_s16();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s32)))
+int32x4_t __arm_vuninitializedq_s32();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s64)))
+int64x2_t __arm_vuninitializedq_s64();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s8)))
+int8x16_t __arm_vuninitializedq_s8();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u16)))
+uint16x8_t __arm_vuninitializedq_u16();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u32)))
+uint32x4_t __arm_vuninitializedq_u32();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u64)))
+uint64x2_t __arm_vuninitializedq_u64();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u8)))
+uint8x16_t __arm_vuninitializedq_u8();
+
+#if (__ARM_FEATURE_MVE & 2)
+
+typedef __fp16 float16_t;
+typedef float float32_t;
+typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t;
+typedef struct { float16x8_t val[2]; } float16x8x2_t;
+typedef struct { float16x8_t val[4]; } float16x8x4_t;
+typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t;
+typedef struct { float32x4_t val[2]; } float32x4x2_t;
+typedef struct { float32x4_t val[4]; } float32x4x4_t;
+
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_f16)))
+float16x8_t __arm_vaddq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_f16)))
+float16x8_t __arm_vaddq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_f32)))
+float32x4_t __arm_vaddq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_f32)))
+float32x4_t __arm_vaddq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f16)))
+float16x8_t __arm_vaddq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f16)))
+float16x8_t __arm_vaddq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f32)))
+float32x4_t __arm_vaddq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f32)))
+float32x4_t __arm_vaddq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f16)))
+mve_pred16_t __arm_vcmpeqq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f16)))
+mve_pred16_t __arm_vcmpeqq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f32)))
+mve_pred16_t __arm_vcmpeqq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f32)))
+mve_pred16_t __arm_vcmpeqq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
+mve_pred16_t __arm_vcmpeqq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
+mve_pred16_t __arm_vcmpeqq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
+mve_pred16_t __arm_vcmpeqq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
+mve_pred16_t __arm_vcmpeqq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
+mve_pred16_t __arm_vcmpeqq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
+mve_pred16_t __arm_vcmpeqq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
+mve_pred16_t __arm_vcmpeqq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
+mve_pred16_t __arm_vcmpeqq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
+mve_pred16_t __arm_vcmpeqq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
+mve_pred16_t __arm_vcmpeqq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
+mve_pred16_t __arm_vcmpeqq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
+mve_pred16_t __arm_vcmpeqq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f16)))
+mve_pred16_t __arm_vcmpgeq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f16)))
+mve_pred16_t __arm_vcmpgeq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f32)))
+mve_pred16_t __arm_vcmpgeq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f32)))
+mve_pred16_t __arm_vcmpgeq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
+mve_pred16_t __arm_vcmpgeq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
+mve_pred16_t __arm_vcmpgeq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
+mve_pred16_t __arm_vcmpgeq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
+mve_pred16_t __arm_vcmpgeq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
+mve_pred16_t __arm_vcmpgeq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
+mve_pred16_t __arm_vcmpgeq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
+mve_pred16_t __arm_vcmpgeq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
+mve_pred16_t __arm_vcmpgeq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
+mve_pred16_t __arm_vcmpgeq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
+mve_pred16_t __arm_vcmpgeq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
+mve_pred16_t __arm_vcmpgeq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
+mve_pred16_t __arm_vcmpgeq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f16)))
+mve_pred16_t __arm_vcmpgtq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f16)))
+mve_pred16_t __arm_vcmpgtq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f32)))
+mve_pred16_t __arm_vcmpgtq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f32)))
+mve_pred16_t __arm_vcmpgtq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
+mve_pred16_t __arm_vcmpgtq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
+mve_pred16_t __arm_vcmpgtq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
+mve_pred16_t __arm_vcmpgtq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
+mve_pred16_t __arm_vcmpgtq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
+mve_pred16_t __arm_vcmpgtq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
+mve_pred16_t __arm_vcmpgtq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
+mve_pred16_t __arm_vcmpgtq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
+mve_pred16_t __arm_vcmpgtq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
+mve_pred16_t __arm_vcmpgtq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
+mve_pred16_t __arm_vcmpgtq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
+mve_pred16_t __arm_vcmpgtq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
+mve_pred16_t __arm_vcmpgtq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f16)))
+mve_pred16_t __arm_vcmpleq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f16)))
+mve_pred16_t __arm_vcmpleq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f32)))
+mve_pred16_t __arm_vcmpleq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f32)))
+mve_pred16_t __arm_vcmpleq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f16)))
+mve_pred16_t __arm_vcmpleq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f16)))
+mve_pred16_t __arm_vcmpleq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f32)))
+mve_pred16_t __arm_vcmpleq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f32)))
+mve_pred16_t __arm_vcmpleq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
+mve_pred16_t __arm_vcmpleq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
+mve_pred16_t __arm_vcmpleq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
+mve_pred16_t __arm_vcmpleq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
+mve_pred16_t __arm_vcmpleq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f16)))
+mve_pred16_t __arm_vcmpleq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f16)))
+mve_pred16_t __arm_vcmpleq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f32)))
+mve_pred16_t __arm_vcmpleq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f32)))
+mve_pred16_t __arm_vcmpleq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f16)))
+mve_pred16_t __arm_vcmpltq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f16)))
+mve_pred16_t __arm_vcmpltq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f32)))
+mve_pred16_t __arm_vcmpltq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f32)))
+mve_pred16_t __arm_vcmpltq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f16)))
+mve_pred16_t __arm_vcmpltq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f16)))
+mve_pred16_t __arm_vcmpltq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f32)))
+mve_pred16_t __arm_vcmpltq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f32)))
+mve_pred16_t __arm_vcmpltq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
+mve_pred16_t __arm_vcmpltq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
+mve_pred16_t __arm_vcmpltq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
+mve_pred16_t __arm_vcmpltq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
+mve_pred16_t __arm_vcmpltq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f16)))
+mve_pred16_t __arm_vcmpltq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f16)))
+mve_pred16_t __arm_vcmpltq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f32)))
+mve_pred16_t __arm_vcmpltq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f32)))
+mve_pred16_t __arm_vcmpltq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f16)))
+mve_pred16_t __arm_vcmpneq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f16)))
+mve_pred16_t __arm_vcmpneq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f32)))
+mve_pred16_t __arm_vcmpneq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f32)))
+mve_pred16_t __arm_vcmpneq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f16)))
+mve_pred16_t __arm_vcmpneq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f16)))
+mve_pred16_t __arm_vcmpneq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f32)))
+mve_pred16_t __arm_vcmpneq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f32)))
+mve_pred16_t __arm_vcmpneq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
+mve_pred16_t __arm_vcmpneq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
+mve_pred16_t __arm_vcmpneq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
+mve_pred16_t __arm_vcmpneq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
+mve_pred16_t __arm_vcmpneq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f16)))
+mve_pred16_t __arm_vcmpneq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f16)))
+mve_pred16_t __arm_vcmpneq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f32)))
+mve_pred16_t __arm_vcmpneq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f32)))
+mve_pred16_t __arm_vcmpneq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_f16)))
+float16x8_t __arm_vcreateq_f16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_f32)))
+float32x4_t __arm_vcreateq_f32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtbq_f16_f32)))
+float16x8_t __arm_vcvtbq_f16_f32(float16x8_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtbq_m_f16_f32)))
+float16x8_t __arm_vcvtbq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvttq_f16_f32)))
+float16x8_t __arm_vcvttq_f16_f32(float16x8_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvttq_m_f16_f32)))
+float16x8_t __arm_vcvttq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f16)))
+float16_t __arm_vgetq_lane_f16(float16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f16)))
+float16_t __arm_vgetq_lane(float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f32)))
+float32_t __arm_vgetq_lane_f32(float32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f32)))
+float32_t __arm_vgetq_lane(float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_f16)))
+float16x8_t __arm_vld1q_f16(const float16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_f16)))
+float16x8_t __arm_vld1q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_f32)))
+float32x4_t __arm_vld1q_f32(const float32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_f32)))
+float32x4_t __arm_vld1q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f16)))
+float16x8_t __arm_vld1q_z_f16(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f16)))
+float16x8_t __arm_vld1q_z(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f32)))
+float32x4_t __arm_vld1q_z_f32(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f32)))
+float32x4_t __arm_vld1q_z(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_f16)))
+float16x8x2_t __arm_vld2q_f16(const float16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_f16)))
+float16x8x2_t __arm_vld2q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_f32)))
+float32x4x2_t __arm_vld2q_f32(const float32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_f32)))
+float32x4x2_t __arm_vld2q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_f16)))
+float16x8x4_t __arm_vld4q_f16(const float16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_f16)))
+float16x8x4_t __arm_vld4q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_f32)))
+float32x4x4_t __arm_vld4q_f32(const float32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_f32)))
+float32x4x4_t __arm_vld4q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_f16)))
+float16x8_t __arm_vldrhq_f16(const float16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
+float16x8_t __arm_vldrhq_gather_offset_f16(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
+float16x8_t __arm_vldrhq_gather_offset(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
+float16x8_t __arm_vldrhq_gather_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
+float16x8_t __arm_vldrhq_gather_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
+float16x8_t __arm_vldrhq_gather_shifted_offset_f16(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
+float16x8_t __arm_vldrhq_gather_shifted_offset(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
+float16x8_t __arm_vldrhq_gather_shifted_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
+float16x8_t __arm_vldrhq_gather_shifted_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_f16)))
+float16x8_t __arm_vldrhq_z_f16(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_f32)))
+float32x4_t __arm_vldrwq_f32(const float32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_f32)))
+float32x4_t __arm_vldrwq_gather_base_f32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_f32)))
+float32x4_t __arm_vldrwq_gather_base_wb_f32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_f32)))
+float32x4_t __arm_vldrwq_gather_base_wb_z_f32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_f32)))
+float32x4_t __arm_vldrwq_gather_base_z_f32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
+float32x4_t __arm_vldrwq_gather_offset_f32(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
+float32x4_t __arm_vldrwq_gather_offset(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
+float32x4_t __arm_vldrwq_gather_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
+float32x4_t __arm_vldrwq_gather_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
+float32x4_t __arm_vldrwq_gather_shifted_offset_f32(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
+float32x4_t __arm_vldrwq_gather_shifted_offset(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
+float32x4_t __arm_vldrwq_gather_shifted_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
+float32x4_t __arm_vldrwq_gather_shifted_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_f32)))
+float32x4_t __arm_vldrwq_z_f32(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
+float16x8_t __arm_vreinterpretq_f16_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
+float16x8_t __arm_vreinterpretq_f16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
+float16x8_t __arm_vreinterpretq_f16_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
+float16x8_t __arm_vreinterpretq_f16(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
+float16x8_t __arm_vreinterpretq_f16_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
+float16x8_t __arm_vreinterpretq_f16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
+float16x8_t __arm_vreinterpretq_f16_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
+float16x8_t __arm_vreinterpretq_f16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
+float16x8_t __arm_vreinterpretq_f16_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
+float16x8_t __arm_vreinterpretq_f16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
+float16x8_t __arm_vreinterpretq_f16_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
+float16x8_t __arm_vreinterpretq_f16(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
+float16x8_t __arm_vreinterpretq_f16_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
+float16x8_t __arm_vreinterpretq_f16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
+float16x8_t __arm_vreinterpretq_f16_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
+float16x8_t __arm_vreinterpretq_f16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
+float16x8_t __arm_vreinterpretq_f16_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
+float16x8_t __arm_vreinterpretq_f16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
+float32x4_t __arm_vreinterpretq_f32_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
+float32x4_t __arm_vreinterpretq_f32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
+float32x4_t __arm_vreinterpretq_f32_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
+float32x4_t __arm_vreinterpretq_f32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
+float32x4_t __arm_vreinterpretq_f32_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
+float32x4_t __arm_vreinterpretq_f32(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
+float32x4_t __arm_vreinterpretq_f32_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
+float32x4_t __arm_vreinterpretq_f32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
+float32x4_t __arm_vreinterpretq_f32_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
+float32x4_t __arm_vreinterpretq_f32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
+float32x4_t __arm_vreinterpretq_f32_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
+float32x4_t __arm_vreinterpretq_f32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
+float32x4_t __arm_vreinterpretq_f32_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
+float32x4_t __arm_vreinterpretq_f32(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
+float32x4_t __arm_vreinterpretq_f32_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
+float32x4_t __arm_vreinterpretq_f32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
+float32x4_t __arm_vreinterpretq_f32_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
+float32x4_t __arm_vreinterpretq_f32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
+int16x8_t __arm_vreinterpretq_s16_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
+int16x8_t __arm_vreinterpretq_s16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
+int16x8_t __arm_vreinterpretq_s16_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
+int16x8_t __arm_vreinterpretq_s16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
+int32x4_t __arm_vreinterpretq_s32_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
+int32x4_t __arm_vreinterpretq_s32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
+int32x4_t __arm_vreinterpretq_s32_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
+int32x4_t __arm_vreinterpretq_s32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
+int64x2_t __arm_vreinterpretq_s64_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
+int64x2_t __arm_vreinterpretq_s64(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
+int64x2_t __arm_vreinterpretq_s64_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
+int64x2_t __arm_vreinterpretq_s64(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
+int8x16_t __arm_vreinterpretq_s8_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
+int8x16_t __arm_vreinterpretq_s8(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
+int8x16_t __arm_vreinterpretq_s8_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
+int8x16_t __arm_vreinterpretq_s8(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
+uint16x8_t __arm_vreinterpretq_u16_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
+uint16x8_t __arm_vreinterpretq_u16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
+uint16x8_t __arm_vreinterpretq_u16_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
+uint16x8_t __arm_vreinterpretq_u16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
+uint32x4_t __arm_vreinterpretq_u32_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
+uint32x4_t __arm_vreinterpretq_u32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
+uint32x4_t __arm_vreinterpretq_u32_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
+uint32x4_t __arm_vreinterpretq_u32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
+uint64x2_t __arm_vreinterpretq_u64_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
+uint64x2_t __arm_vreinterpretq_u64(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
+uint64x2_t __arm_vreinterpretq_u64_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
+uint64x2_t __arm_vreinterpretq_u64(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
+uint8x16_t __arm_vreinterpretq_u8_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
+uint8x16_t __arm_vreinterpretq_u8(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
+uint8x16_t __arm_vreinterpretq_u8_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
+uint8x16_t __arm_vreinterpretq_u8(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f16)))
+float16x8_t __arm_vsetq_lane_f16(float16_t, float16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f16)))
+float16x8_t __arm_vsetq_lane(float16_t, float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f32)))
+float32x4_t __arm_vsetq_lane_f32(float32_t, float32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f32)))
+float32x4_t __arm_vsetq_lane(float32_t, float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_f16)))
+void __arm_vst1q_f16(float16_t *, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_f16)))
+void __arm_vst1q(float16_t *, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_f32)))
+void __arm_vst1q_f32(float32_t *, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_f32)))
+void __arm_vst1q(float32_t *, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f16)))
+void __arm_vst1q_p_f16(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f16)))
+void __arm_vst1q_p(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f32)))
+void __arm_vst1q_p_f32(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f32)))
+void __arm_vst1q_p(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_f16)))
+void __arm_vst2q_f16(float16_t *, float16x8x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_f16)))
+void __arm_vst2q(float16_t *, float16x8x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_f32)))
+void __arm_vst2q_f32(float32_t *, float32x4x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_f32)))
+void __arm_vst2q(float32_t *, float32x4x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_f16)))
+void __arm_vst4q_f16(float16_t *, float16x8x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_f16)))
+void __arm_vst4q(float16_t *, float16x8x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_f32)))
+void __arm_vst4q_f32(float32_t *, float32x4x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_f32)))
+void __arm_vst4q(float32_t *, float32x4x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_f16)))
+void __arm_vstrhq_f16(float16_t *, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_f16)))
+void __arm_vstrhq(float16_t *, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_f16)))
+void __arm_vstrhq_p_f16(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_f16)))
+void __arm_vstrhq_p(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
+void __arm_vstrhq_scatter_offset_f16(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
+void __arm_vstrhq_scatter_offset(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
+void __arm_vstrhq_scatter_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
+void __arm_vstrhq_scatter_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
+void __arm_vstrhq_scatter_shifted_offset_f16(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
+void __arm_vstrhq_scatter_shifted_offset(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
+void __arm_vstrhq_scatter_shifted_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
+void __arm_vstrhq_scatter_shifted_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_f32)))
+void __arm_vstrwq_f32(float32_t *, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_f32)))
+void __arm_vstrwq(float32_t *, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_f32)))
+void __arm_vstrwq_p_f32(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_f32)))
+void __arm_vstrwq_p(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
+void __arm_vstrwq_scatter_base_f32(uint32x4_t, int, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
+void __arm_vstrwq_scatter_base(uint32x4_t, int, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
+void __arm_vstrwq_scatter_base_p_f32(uint32x4_t, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
+void __arm_vstrwq_scatter_base_p(uint32x4_t, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
+void __arm_vstrwq_scatter_base_wb_f32(uint32x4_t *, int, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
+void __arm_vstrwq_scatter_base_wb(uint32x4_t *, int, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
+void __arm_vstrwq_scatter_base_wb_p_f32(uint32x4_t *, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
+void __arm_vstrwq_scatter_base_wb_p(uint32x4_t *, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
+void __arm_vstrwq_scatter_offset_f32(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
+void __arm_vstrwq_scatter_offset(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
+void __arm_vstrwq_scatter_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
+void __arm_vstrwq_scatter_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
+void __arm_vstrwq_scatter_shifted_offset_f32(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
+void __arm_vstrwq_scatter_shifted_offset(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
+void __arm_vstrwq_scatter_shifted_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
+void __arm_vstrwq_scatter_shifted_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_f16)))
+float16x8_t __arm_vsubq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_f16)))
+float16x8_t __arm_vsubq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_f32)))
+float32x4_t __arm_vsubq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_f32)))
+float32x4_t __arm_vsubq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f16)))
+float16x8_t __arm_vsubq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f16)))
+float16x8_t __arm_vsubq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f32)))
+float32x4_t __arm_vsubq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f32)))
+float32x4_t __arm_vsubq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_f16)))
+float16x8_t __arm_vuninitializedq_f16();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_f32)))
+float32x4_t __arm_vuninitializedq_f32();
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f16)))
+float16x8_t __arm_vuninitializedq(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f32)))
+float32x4_t __arm_vuninitializedq(float32x4_t);
+
+#endif /* (__ARM_FEATURE_MVE & 2) */
+
+#if (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE)
+
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_asrl)))
+int64_t asrl(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_lsll)))
+uint64_t lsll(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshr)))
+int32_t sqrshr(int32_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshrl)))
+int64_t sqrshrl(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqrshrl_sat48)))
+int64_t sqrshrl_sat48(int64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqshl)))
+int32_t sqshl(int32_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_sqshll)))
+int64_t sqshll(int64_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_srshr)))
+int32_t srshr(int32_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_srshrl)))
+int64_t srshrl(int64_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshl)))
+uint32_t uqrshl(uint32_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshll)))
+uint64_t uqrshll(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqrshll_sat48)))
+uint64_t uqrshll_sat48(uint64_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqshl)))
+uint32_t uqshl(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_uqshll)))
+uint64_t uqshll(uint64_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_urshr)))
+uint32_t urshr(uint32_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_urshrl)))
+uint64_t urshrl(uint64_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_s32)))
+int32x4_t vadciq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_s32)))
+int32x4_t vadciq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_u32)))
+uint32x4_t vadciq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_m_u32)))
+uint32x4_t vadciq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_s32)))
+int32x4_t vadciq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_s32)))
+int32x4_t vadciq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadciq_u32)))
+uint32x4_t vadciq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadciq_u32)))
+uint32x4_t vadciq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_s32)))
+int32x4_t vadcq_m_s32(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_s32)))
+int32x4_t vadcq_m(int32x4_t, int32x4_t, int32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_u32)))
+uint32x4_t vadcq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_m_u32)))
+uint32x4_t vadcq_m(uint32x4_t, uint32x4_t, uint32x4_t, unsigned *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_s32)))
+int32x4_t vadcq_s32(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_s32)))
+int32x4_t vadcq(int32x4_t, int32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vadcq_u32)))
+uint32x4_t vadcq_u32(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vadcq_u32)))
+uint32x4_t vadcq(uint32x4_t, uint32x4_t, unsigned *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s16)))
+int16x8_t vaddq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s16)))
+int16x8_t vaddq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s32)))
+int32x4_t vaddq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s32)))
+int32x4_t vaddq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s8)))
+int8x16_t vaddq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_s8)))
+int8x16_t vaddq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u16)))
+uint16x8_t vaddq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u16)))
+uint16x8_t vaddq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u32)))
+uint32x4_t vaddq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u32)))
+uint32x4_t vaddq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u8)))
+uint8x16_t vaddq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_u8)))
+uint8x16_t vaddq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s16)))
+int16x8_t vaddq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s16)))
+int16x8_t vaddq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s32)))
+int32x4_t vaddq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s32)))
+int32x4_t vaddq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_s8)))
+int8x16_t vaddq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_s8)))
+int8x16_t vaddq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u16)))
+uint16x8_t vaddq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u16)))
+uint16x8_t vaddq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u32)))
+uint32x4_t vaddq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u32)))
+uint32x4_t vaddq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_u8)))
+uint8x16_t vaddq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_u8)))
+uint8x16_t vaddq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
+mve_pred16_t vcmpcsq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u16)))
+mve_pred16_t vcmpcsq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
+mve_pred16_t vcmpcsq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u32)))
+mve_pred16_t vcmpcsq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
+mve_pred16_t vcmpcsq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_n_u8)))
+mve_pred16_t vcmpcsq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
+mve_pred16_t vcmpcsq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u16)))
+mve_pred16_t vcmpcsq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
+mve_pred16_t vcmpcsq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u32)))
+mve_pred16_t vcmpcsq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
+mve_pred16_t vcmpcsq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_m_u8)))
+mve_pred16_t vcmpcsq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
+mve_pred16_t vcmpcsq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u16)))
+mve_pred16_t vcmpcsq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
+mve_pred16_t vcmpcsq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u32)))
+mve_pred16_t vcmpcsq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
+mve_pred16_t vcmpcsq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_n_u8)))
+mve_pred16_t vcmpcsq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u16)))
+mve_pred16_t vcmpcsq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u16)))
+mve_pred16_t vcmpcsq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u32)))
+mve_pred16_t vcmpcsq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u32)))
+mve_pred16_t vcmpcsq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u8)))
+mve_pred16_t vcmpcsq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpcsq_u8)))
+mve_pred16_t vcmpcsq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
+mve_pred16_t vcmpeqq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s16)))
+mve_pred16_t vcmpeqq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
+mve_pred16_t vcmpeqq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s32)))
+mve_pred16_t vcmpeqq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
+mve_pred16_t vcmpeqq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_s8)))
+mve_pred16_t vcmpeqq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
+mve_pred16_t vcmpeqq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u16)))
+mve_pred16_t vcmpeqq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
+mve_pred16_t vcmpeqq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u32)))
+mve_pred16_t vcmpeqq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
+mve_pred16_t vcmpeqq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_u8)))
+mve_pred16_t vcmpeqq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
+mve_pred16_t vcmpeqq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s16)))
+mve_pred16_t vcmpeqq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
+mve_pred16_t vcmpeqq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s32)))
+mve_pred16_t vcmpeqq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
+mve_pred16_t vcmpeqq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_s8)))
+mve_pred16_t vcmpeqq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
+mve_pred16_t vcmpeqq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u16)))
+mve_pred16_t vcmpeqq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
+mve_pred16_t vcmpeqq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u32)))
+mve_pred16_t vcmpeqq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
+mve_pred16_t vcmpeqq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_u8)))
+mve_pred16_t vcmpeqq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
+mve_pred16_t vcmpeqq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s16)))
+mve_pred16_t vcmpeqq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
+mve_pred16_t vcmpeqq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s32)))
+mve_pred16_t vcmpeqq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
+mve_pred16_t vcmpeqq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_s8)))
+mve_pred16_t vcmpeqq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
+mve_pred16_t vcmpeqq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u16)))
+mve_pred16_t vcmpeqq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
+mve_pred16_t vcmpeqq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u32)))
+mve_pred16_t vcmpeqq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
+mve_pred16_t vcmpeqq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_u8)))
+mve_pred16_t vcmpeqq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s16)))
+mve_pred16_t vcmpeqq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s16)))
+mve_pred16_t vcmpeqq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s32)))
+mve_pred16_t vcmpeqq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s32)))
+mve_pred16_t vcmpeqq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s8)))
+mve_pred16_t vcmpeqq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_s8)))
+mve_pred16_t vcmpeqq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u16)))
+mve_pred16_t vcmpeqq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u16)))
+mve_pred16_t vcmpeqq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u32)))
+mve_pred16_t vcmpeqq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u32)))
+mve_pred16_t vcmpeqq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u8)))
+mve_pred16_t vcmpeqq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_u8)))
+mve_pred16_t vcmpeqq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
+mve_pred16_t vcmpgeq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s16)))
+mve_pred16_t vcmpgeq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
+mve_pred16_t vcmpgeq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s32)))
+mve_pred16_t vcmpgeq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
+mve_pred16_t vcmpgeq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_s8)))
+mve_pred16_t vcmpgeq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
+mve_pred16_t vcmpgeq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s16)))
+mve_pred16_t vcmpgeq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
+mve_pred16_t vcmpgeq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s32)))
+mve_pred16_t vcmpgeq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
+mve_pred16_t vcmpgeq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_s8)))
+mve_pred16_t vcmpgeq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
+mve_pred16_t vcmpgeq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s16)))
+mve_pred16_t vcmpgeq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
+mve_pred16_t vcmpgeq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s32)))
+mve_pred16_t vcmpgeq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
+mve_pred16_t vcmpgeq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_s8)))
+mve_pred16_t vcmpgeq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s16)))
+mve_pred16_t vcmpgeq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s16)))
+mve_pred16_t vcmpgeq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s32)))
+mve_pred16_t vcmpgeq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s32)))
+mve_pred16_t vcmpgeq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s8)))
+mve_pred16_t vcmpgeq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_s8)))
+mve_pred16_t vcmpgeq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
+mve_pred16_t vcmpgtq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s16)))
+mve_pred16_t vcmpgtq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
+mve_pred16_t vcmpgtq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s32)))
+mve_pred16_t vcmpgtq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
+mve_pred16_t vcmpgtq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_s8)))
+mve_pred16_t vcmpgtq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
+mve_pred16_t vcmpgtq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s16)))
+mve_pred16_t vcmpgtq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
+mve_pred16_t vcmpgtq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s32)))
+mve_pred16_t vcmpgtq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
+mve_pred16_t vcmpgtq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_s8)))
+mve_pred16_t vcmpgtq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
+mve_pred16_t vcmpgtq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s16)))
+mve_pred16_t vcmpgtq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
+mve_pred16_t vcmpgtq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s32)))
+mve_pred16_t vcmpgtq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
+mve_pred16_t vcmpgtq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_s8)))
+mve_pred16_t vcmpgtq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s16)))
+mve_pred16_t vcmpgtq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s16)))
+mve_pred16_t vcmpgtq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s32)))
+mve_pred16_t vcmpgtq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s32)))
+mve_pred16_t vcmpgtq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s8)))
+mve_pred16_t vcmpgtq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_s8)))
+mve_pred16_t vcmpgtq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
+mve_pred16_t vcmphiq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u16)))
+mve_pred16_t vcmphiq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
+mve_pred16_t vcmphiq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u32)))
+mve_pred16_t vcmphiq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
+mve_pred16_t vcmphiq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_n_u8)))
+mve_pred16_t vcmphiq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u16)))
+mve_pred16_t vcmphiq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u16)))
+mve_pred16_t vcmphiq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u32)))
+mve_pred16_t vcmphiq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u32)))
+mve_pred16_t vcmphiq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u8)))
+mve_pred16_t vcmphiq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_m_u8)))
+mve_pred16_t vcmphiq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u16)))
+mve_pred16_t vcmphiq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u16)))
+mve_pred16_t vcmphiq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u32)))
+mve_pred16_t vcmphiq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u32)))
+mve_pred16_t vcmphiq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u8)))
+mve_pred16_t vcmphiq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_n_u8)))
+mve_pred16_t vcmphiq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u16)))
+mve_pred16_t vcmphiq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u16)))
+mve_pred16_t vcmphiq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u32)))
+mve_pred16_t vcmphiq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u32)))
+mve_pred16_t vcmphiq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u8)))
+mve_pred16_t vcmphiq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmphiq_u8)))
+mve_pred16_t vcmphiq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
+mve_pred16_t vcmpleq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s16)))
+mve_pred16_t vcmpleq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
+mve_pred16_t vcmpleq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s32)))
+mve_pred16_t vcmpleq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
+mve_pred16_t vcmpleq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_s8)))
+mve_pred16_t vcmpleq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s16)))
+mve_pred16_t vcmpleq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s16)))
+mve_pred16_t vcmpleq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s32)))
+mve_pred16_t vcmpleq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s32)))
+mve_pred16_t vcmpleq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s8)))
+mve_pred16_t vcmpleq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_s8)))
+mve_pred16_t vcmpleq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s16)))
+mve_pred16_t vcmpleq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s16)))
+mve_pred16_t vcmpleq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s32)))
+mve_pred16_t vcmpleq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s32)))
+mve_pred16_t vcmpleq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s8)))
+mve_pred16_t vcmpleq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_s8)))
+mve_pred16_t vcmpleq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s16)))
+mve_pred16_t vcmpleq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s16)))
+mve_pred16_t vcmpleq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s32)))
+mve_pred16_t vcmpleq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s32)))
+mve_pred16_t vcmpleq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s8)))
+mve_pred16_t vcmpleq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_s8)))
+mve_pred16_t vcmpleq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
+mve_pred16_t vcmpltq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s16)))
+mve_pred16_t vcmpltq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
+mve_pred16_t vcmpltq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s32)))
+mve_pred16_t vcmpltq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
+mve_pred16_t vcmpltq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_s8)))
+mve_pred16_t vcmpltq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s16)))
+mve_pred16_t vcmpltq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s16)))
+mve_pred16_t vcmpltq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s32)))
+mve_pred16_t vcmpltq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s32)))
+mve_pred16_t vcmpltq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s8)))
+mve_pred16_t vcmpltq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_s8)))
+mve_pred16_t vcmpltq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s16)))
+mve_pred16_t vcmpltq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s16)))
+mve_pred16_t vcmpltq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s32)))
+mve_pred16_t vcmpltq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s32)))
+mve_pred16_t vcmpltq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s8)))
+mve_pred16_t vcmpltq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_s8)))
+mve_pred16_t vcmpltq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s16)))
+mve_pred16_t vcmpltq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s16)))
+mve_pred16_t vcmpltq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s32)))
+mve_pred16_t vcmpltq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s32)))
+mve_pred16_t vcmpltq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s8)))
+mve_pred16_t vcmpltq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_s8)))
+mve_pred16_t vcmpltq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
+mve_pred16_t vcmpneq_m_n_s16(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s16)))
+mve_pred16_t vcmpneq_m(int16x8_t, int16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
+mve_pred16_t vcmpneq_m_n_s32(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s32)))
+mve_pred16_t vcmpneq_m(int32x4_t, int32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
+mve_pred16_t vcmpneq_m_n_s8(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_s8)))
+mve_pred16_t vcmpneq_m(int8x16_t, int8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
+mve_pred16_t vcmpneq_m_n_u16(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u16)))
+mve_pred16_t vcmpneq_m(uint16x8_t, uint16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
+mve_pred16_t vcmpneq_m_n_u32(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u32)))
+mve_pred16_t vcmpneq_m(uint32x4_t, uint32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
+mve_pred16_t vcmpneq_m_n_u8(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_u8)))
+mve_pred16_t vcmpneq_m(uint8x16_t, uint8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s16)))
+mve_pred16_t vcmpneq_m_s16(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s16)))
+mve_pred16_t vcmpneq_m(int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s32)))
+mve_pred16_t vcmpneq_m_s32(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s32)))
+mve_pred16_t vcmpneq_m(int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s8)))
+mve_pred16_t vcmpneq_m_s8(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_s8)))
+mve_pred16_t vcmpneq_m(int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u16)))
+mve_pred16_t vcmpneq_m_u16(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u16)))
+mve_pred16_t vcmpneq_m(uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u32)))
+mve_pred16_t vcmpneq_m_u32(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u32)))
+mve_pred16_t vcmpneq_m(uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u8)))
+mve_pred16_t vcmpneq_m_u8(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_u8)))
+mve_pred16_t vcmpneq_m(uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s16)))
+mve_pred16_t vcmpneq_n_s16(int16x8_t, int16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s16)))
+mve_pred16_t vcmpneq(int16x8_t, int16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s32)))
+mve_pred16_t vcmpneq_n_s32(int32x4_t, int32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s32)))
+mve_pred16_t vcmpneq(int32x4_t, int32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s8)))
+mve_pred16_t vcmpneq_n_s8(int8x16_t, int8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_s8)))
+mve_pred16_t vcmpneq(int8x16_t, int8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u16)))
+mve_pred16_t vcmpneq_n_u16(uint16x8_t, uint16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u16)))
+mve_pred16_t vcmpneq(uint16x8_t, uint16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u32)))
+mve_pred16_t vcmpneq_n_u32(uint32x4_t, uint32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u32)))
+mve_pred16_t vcmpneq(uint32x4_t, uint32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u8)))
+mve_pred16_t vcmpneq_n_u8(uint8x16_t, uint8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_u8)))
+mve_pred16_t vcmpneq(uint8x16_t, uint8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s16)))
+mve_pred16_t vcmpneq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s16)))
+mve_pred16_t vcmpneq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s32)))
+mve_pred16_t vcmpneq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s32)))
+mve_pred16_t vcmpneq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s8)))
+mve_pred16_t vcmpneq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_s8)))
+mve_pred16_t vcmpneq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u16)))
+mve_pred16_t vcmpneq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u16)))
+mve_pred16_t vcmpneq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u32)))
+mve_pred16_t vcmpneq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u32)))
+mve_pred16_t vcmpneq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u8)))
+mve_pred16_t vcmpneq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_u8)))
+mve_pred16_t vcmpneq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s16)))
+int16x8_t vcreateq_s16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s32)))
+int32x4_t vcreateq_s32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s64)))
+int64x2_t vcreateq_s64(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_s8)))
+int8x16_t vcreateq_s8(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u16)))
+uint16x8_t vcreateq_u16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u32)))
+uint32x4_t vcreateq_u32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u64)))
+uint64x2_t vcreateq_u64(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_u8)))
+uint8x16_t vcreateq_u8(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s16)))
+int16_t vgetq_lane_s16(int16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s16)))
+int16_t vgetq_lane(int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s32)))
+int32_t vgetq_lane_s32(int32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s32)))
+int32_t vgetq_lane(int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s64)))
+int64_t vgetq_lane_s64(int64x2_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s64)))
+int64_t vgetq_lane(int64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s8)))
+int8_t vgetq_lane_s8(int8x16_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_s8)))
+int8_t vgetq_lane(int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u16)))
+uint16_t vgetq_lane_u16(uint16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u16)))
+uint16_t vgetq_lane(uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u32)))
+uint32_t vgetq_lane_u32(uint32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u32)))
+uint32_t vgetq_lane(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u64)))
+uint64_t vgetq_lane_u64(uint64x2_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u64)))
+uint64_t vgetq_lane(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u8)))
+uint8_t vgetq_lane_u8(uint8x16_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_u8)))
+uint8_t vgetq_lane(uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s16)))
+int16x8_t vld1q_s16(const int16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s16)))
+int16x8_t vld1q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s32)))
+int32x4_t vld1q_s32(const int32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s32)))
+int32x4_t vld1q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_s8)))
+int8x16_t vld1q_s8(const int8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_s8)))
+int8x16_t vld1q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u16)))
+uint16x8_t vld1q_u16(const uint16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u16)))
+uint16x8_t vld1q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u32)))
+uint32x4_t vld1q_u32(const uint32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u32)))
+uint32x4_t vld1q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_u8)))
+uint8x16_t vld1q_u8(const uint8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_u8)))
+uint8x16_t vld1q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s16)))
+int16x8_t vld1q_z_s16(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s16)))
+int16x8_t vld1q_z(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s32)))
+int32x4_t vld1q_z_s32(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s32)))
+int32x4_t vld1q_z(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s8)))
+int8x16_t vld1q_z_s8(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_s8)))
+int8x16_t vld1q_z(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u16)))
+uint16x8_t vld1q_z_u16(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u16)))
+uint16x8_t vld1q_z(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u32)))
+uint32x4_t vld1q_z_u32(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u32)))
+uint32x4_t vld1q_z(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u8)))
+uint8x16_t vld1q_z_u8(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_u8)))
+uint8x16_t vld1q_z(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s16)))
+int16x8x2_t vld2q_s16(const int16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s16)))
+int16x8x2_t vld2q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s32)))
+int32x4x2_t vld2q_s32(const int32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s32)))
+int32x4x2_t vld2q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_s8)))
+int8x16x2_t vld2q_s8(const int8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_s8)))
+int8x16x2_t vld2q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u16)))
+uint16x8x2_t vld2q_u16(const uint16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u16)))
+uint16x8x2_t vld2q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u32)))
+uint32x4x2_t vld2q_u32(const uint32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u32)))
+uint32x4x2_t vld2q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_u8)))
+uint8x16x2_t vld2q_u8(const uint8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_u8)))
+uint8x16x2_t vld2q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s16)))
+int16x8x4_t vld4q_s16(const int16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s16)))
+int16x8x4_t vld4q(const int16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s32)))
+int32x4x4_t vld4q_s32(const int32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s32)))
+int32x4x4_t vld4q(const int32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_s8)))
+int8x16x4_t vld4q_s8(const int8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_s8)))
+int8x16x4_t vld4q(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u16)))
+uint16x8x4_t vld4q_u16(const uint16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u16)))
+uint16x8x4_t vld4q(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u32)))
+uint32x4x4_t vld4q_u32(const uint32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u32)))
+uint32x4x4_t vld4q(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_u8)))
+uint8x16x4_t vld4q_u8(const uint8_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_u8)))
+uint8x16x4_t vld4q(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
+int16x8_t vldrbq_gather_offset_s16(const int8_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s16)))
+int16x8_t vldrbq_gather_offset(const int8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
+int32x4_t vldrbq_gather_offset_s32(const int8_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s32)))
+int32x4_t vldrbq_gather_offset(const int8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
+int8x16_t vldrbq_gather_offset_s8(const int8_t *, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_s8)))
+int8x16_t vldrbq_gather_offset(const int8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
+uint16x8_t vldrbq_gather_offset_u16(const uint8_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u16)))
+uint16x8_t vldrbq_gather_offset(const uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
+uint32x4_t vldrbq_gather_offset_u32(const uint8_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u32)))
+uint32x4_t vldrbq_gather_offset(const uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
+uint8x16_t vldrbq_gather_offset_u8(const uint8_t *, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_u8)))
+uint8x16_t vldrbq_gather_offset(const uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
+int16x8_t vldrbq_gather_offset_z_s16(const int8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s16)))
+int16x8_t vldrbq_gather_offset_z(const int8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
+int32x4_t vldrbq_gather_offset_z_s32(const int8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s32)))
+int32x4_t vldrbq_gather_offset_z(const int8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
+int8x16_t vldrbq_gather_offset_z_s8(const int8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_s8)))
+int8x16_t vldrbq_gather_offset_z(const int8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
+uint16x8_t vldrbq_gather_offset_z_u16(const uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u16)))
+uint16x8_t vldrbq_gather_offset_z(const uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
+uint32x4_t vldrbq_gather_offset_z_u32(const uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u32)))
+uint32x4_t vldrbq_gather_offset_z(const uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
+uint8x16_t vldrbq_gather_offset_z_u8(const uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrbq_gather_offset_z_u8)))
+uint8x16_t vldrbq_gather_offset_z(const uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s16)))
+int16x8_t vldrbq_s16(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s32)))
+int32x4_t vldrbq_s32(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_s8)))
+int8x16_t vldrbq_s8(const int8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u16)))
+uint16x8_t vldrbq_u16(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u32)))
+uint32x4_t vldrbq_u32(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_u8)))
+uint8x16_t vldrbq_u8(const uint8_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s16)))
+int16x8_t vldrbq_z_s16(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s32)))
+int32x4_t vldrbq_z_s32(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_s8)))
+int8x16_t vldrbq_z_s8(const int8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u16)))
+uint16x8_t vldrbq_z_u16(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u32)))
+uint32x4_t vldrbq_z_u32(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrbq_z_u8)))
+uint8x16_t vldrbq_z_u8(const uint8_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_s64)))
+int64x2_t vldrdq_gather_base_s64(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_u64)))
+uint64x2_t vldrdq_gather_base_u64(uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_s64)))
+int64x2_t vldrdq_gather_base_wb_s64(uint64x2_t *, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_u64)))
+uint64x2_t vldrdq_gather_base_wb_u64(uint64x2_t *, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_s64)))
+int64x2_t vldrdq_gather_base_wb_z_s64(uint64x2_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_wb_z_u64)))
+uint64x2_t vldrdq_gather_base_wb_z_u64(uint64x2_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_z_s64)))
+int64x2_t vldrdq_gather_base_z_s64(uint64x2_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_base_z_u64)))
+uint64x2_t vldrdq_gather_base_z_u64(uint64x2_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
+int64x2_t vldrdq_gather_offset_s64(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_s64)))
+int64x2_t vldrdq_gather_offset(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
+uint64x2_t vldrdq_gather_offset_u64(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_u64)))
+uint64x2_t vldrdq_gather_offset(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
+int64x2_t vldrdq_gather_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_s64)))
+int64x2_t vldrdq_gather_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
+uint64x2_t vldrdq_gather_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_offset_z_u64)))
+uint64x2_t vldrdq_gather_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
+int64x2_t vldrdq_gather_shifted_offset_s64(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_s64)))
+int64x2_t vldrdq_gather_shifted_offset(const int64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
+uint64x2_t vldrdq_gather_shifted_offset_u64(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_u64)))
+uint64x2_t vldrdq_gather_shifted_offset(const uint64_t *, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
+int64x2_t vldrdq_gather_shifted_offset_z_s64(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_s64)))
+int64x2_t vldrdq_gather_shifted_offset_z(const int64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
+uint64x2_t vldrdq_gather_shifted_offset_z_u64(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrdq_gather_shifted_offset_z_u64)))
+uint64x2_t vldrdq_gather_shifted_offset_z(const uint64_t *, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
+int16x8_t vldrhq_gather_offset_s16(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s16)))
+int16x8_t vldrhq_gather_offset(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
+int32x4_t vldrhq_gather_offset_s32(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_s32)))
+int32x4_t vldrhq_gather_offset(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
+uint16x8_t vldrhq_gather_offset_u16(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u16)))
+uint16x8_t vldrhq_gather_offset(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
+uint32x4_t vldrhq_gather_offset_u32(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_u32)))
+uint32x4_t vldrhq_gather_offset(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
+int16x8_t vldrhq_gather_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s16)))
+int16x8_t vldrhq_gather_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
+int32x4_t vldrhq_gather_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_s32)))
+int32x4_t vldrhq_gather_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
+uint16x8_t vldrhq_gather_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u16)))
+uint16x8_t vldrhq_gather_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
+uint32x4_t vldrhq_gather_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_u32)))
+uint32x4_t vldrhq_gather_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
+int16x8_t vldrhq_gather_shifted_offset_s16(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s16)))
+int16x8_t vldrhq_gather_shifted_offset(const int16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
+int32x4_t vldrhq_gather_shifted_offset_s32(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_s32)))
+int32x4_t vldrhq_gather_shifted_offset(const int16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
+uint16x8_t vldrhq_gather_shifted_offset_u16(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u16)))
+uint16x8_t vldrhq_gather_shifted_offset(const uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
+uint32x4_t vldrhq_gather_shifted_offset_u32(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_u32)))
+uint32x4_t vldrhq_gather_shifted_offset(const uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
+int16x8_t vldrhq_gather_shifted_offset_z_s16(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s16)))
+int16x8_t vldrhq_gather_shifted_offset_z(const int16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
+int32x4_t vldrhq_gather_shifted_offset_z_s32(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_s32)))
+int32x4_t vldrhq_gather_shifted_offset_z(const int16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
+uint16x8_t vldrhq_gather_shifted_offset_z_u16(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u16)))
+uint16x8_t vldrhq_gather_shifted_offset_z(const uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
+uint32x4_t vldrhq_gather_shifted_offset_z_u32(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_u32)))
+uint32x4_t vldrhq_gather_shifted_offset_z(const uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_s16)))
+int16x8_t vldrhq_s16(const int16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_s32)))
+int32x4_t vldrhq_s32(const int16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_u16)))
+uint16x8_t vldrhq_u16(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_u32)))
+uint32x4_t vldrhq_u32(const uint16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_s16)))
+int16x8_t vldrhq_z_s16(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_s32)))
+int32x4_t vldrhq_z_s32(const int16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_u16)))
+uint16x8_t vldrhq_z_u16(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_u32)))
+uint32x4_t vldrhq_z_u32(const uint16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_s32)))
+int32x4_t vldrwq_gather_base_s32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_u32)))
+uint32x4_t vldrwq_gather_base_u32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_s32)))
+int32x4_t vldrwq_gather_base_wb_s32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_u32)))
+uint32x4_t vldrwq_gather_base_wb_u32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_s32)))
+int32x4_t vldrwq_gather_base_wb_z_s32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_u32)))
+uint32x4_t vldrwq_gather_base_wb_z_u32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_s32)))
+int32x4_t vldrwq_gather_base_z_s32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_u32)))
+uint32x4_t vldrwq_gather_base_z_u32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
+int32x4_t vldrwq_gather_offset_s32(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_s32)))
+int32x4_t vldrwq_gather_offset(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
+uint32x4_t vldrwq_gather_offset_u32(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_u32)))
+uint32x4_t vldrwq_gather_offset(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
+int32x4_t vldrwq_gather_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_s32)))
+int32x4_t vldrwq_gather_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
+uint32x4_t vldrwq_gather_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_u32)))
+uint32x4_t vldrwq_gather_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
+int32x4_t vldrwq_gather_shifted_offset_s32(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_s32)))
+int32x4_t vldrwq_gather_shifted_offset(const int32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
+uint32x4_t vldrwq_gather_shifted_offset_u32(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_u32)))
+uint32x4_t vldrwq_gather_shifted_offset(const uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
+int32x4_t vldrwq_gather_shifted_offset_z_s32(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_s32)))
+int32x4_t vldrwq_gather_shifted_offset_z(const int32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
+uint32x4_t vldrwq_gather_shifted_offset_z_u32(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_u32)))
+uint32x4_t vldrwq_gather_shifted_offset_z(const uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_s32)))
+int32x4_t vldrwq_s32(const int32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_u32)))
+uint32x4_t vldrwq_u32(const uint32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_s32)))
+int32x4_t vldrwq_z_s32(const int32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_u32)))
+uint32x4_t vldrwq_z_u32(const uint32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s16)))
+int16_t vmaxvq_s16(int16_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s16)))
+int16_t vmaxvq(int16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s32)))
+int32_t vmaxvq_s32(int32_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s32)))
+int32_t vmaxvq(int32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s8)))
+int8_t vmaxvq_s8(int8_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_s8)))
+int8_t vmaxvq(int8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u16)))
+uint16_t vmaxvq_u16(uint16_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u16)))
+uint16_t vmaxvq(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u32)))
+uint32_t vmaxvq_u32(uint32_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u32)))
+uint32_t vmaxvq(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u8)))
+uint8_t vmaxvq_u8(uint8_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vmaxvq_u8)))
+uint8_t vmaxvq(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s16)))
+int16_t vminvq_s16(int16_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s16)))
+int16_t vminvq(int16_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s32)))
+int32_t vminvq_s32(int32_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s32)))
+int32_t vminvq(int32_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_s8)))
+int8_t vminvq_s8(int8_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_s8)))
+int8_t vminvq(int8_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u16)))
+uint16_t vminvq_u16(uint16_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u16)))
+uint16_t vminvq(uint16_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u32)))
+uint32_t vminvq_u32(uint32_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u32)))
+uint32_t vminvq(uint32_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vminvq_u8)))
+uint8_t vminvq_u8(uint8_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vminvq_u8)))
+uint8_t vminvq(uint8_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
+int16x8_t vreinterpretq_s16_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s32)))
+int16x8_t vreinterpretq_s16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
+int16x8_t vreinterpretq_s16_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s64)))
+int16x8_t vreinterpretq_s16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
+int16x8_t vreinterpretq_s16_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_s8)))
+int16x8_t vreinterpretq_s16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
+int16x8_t vreinterpretq_s16_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u16)))
+int16x8_t vreinterpretq_s16(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
+int16x8_t vreinterpretq_s16_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u32)))
+int16x8_t vreinterpretq_s16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
+int16x8_t vreinterpretq_s16_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u64)))
+int16x8_t vreinterpretq_s16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
+int16x8_t vreinterpretq_s16_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
+int16x8_t vreinterpretq_s16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
+int32x4_t vreinterpretq_s32_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s16)))
+int32x4_t vreinterpretq_s32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
+int32x4_t vreinterpretq_s32_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s64)))
+int32x4_t vreinterpretq_s32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
+int32x4_t vreinterpretq_s32_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_s8)))
+int32x4_t vreinterpretq_s32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
+int32x4_t vreinterpretq_s32_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u16)))
+int32x4_t vreinterpretq_s32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
+int32x4_t vreinterpretq_s32_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u32)))
+int32x4_t vreinterpretq_s32(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
+int32x4_t vreinterpretq_s32_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u64)))
+int32x4_t vreinterpretq_s32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
+int32x4_t vreinterpretq_s32_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
+int32x4_t vreinterpretq_s32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
+int64x2_t vreinterpretq_s64_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s16)))
+int64x2_t vreinterpretq_s64(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
+int64x2_t vreinterpretq_s64_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s32)))
+int64x2_t vreinterpretq_s64(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
+int64x2_t vreinterpretq_s64_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_s8)))
+int64x2_t vreinterpretq_s64(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
+int64x2_t vreinterpretq_s64_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u16)))
+int64x2_t vreinterpretq_s64(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
+int64x2_t vreinterpretq_s64_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u32)))
+int64x2_t vreinterpretq_s64(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
+int64x2_t vreinterpretq_s64_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u64)))
+int64x2_t vreinterpretq_s64(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
+int64x2_t vreinterpretq_s64_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
+int64x2_t vreinterpretq_s64(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
+int8x16_t vreinterpretq_s8_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s16)))
+int8x16_t vreinterpretq_s8(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
+int8x16_t vreinterpretq_s8_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s32)))
+int8x16_t vreinterpretq_s8(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
+int8x16_t vreinterpretq_s8_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_s64)))
+int8x16_t vreinterpretq_s8(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
+int8x16_t vreinterpretq_s8_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u16)))
+int8x16_t vreinterpretq_s8(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
+int8x16_t vreinterpretq_s8_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u32)))
+int8x16_t vreinterpretq_s8(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
+int8x16_t vreinterpretq_s8_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u64)))
+int8x16_t vreinterpretq_s8(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
+int8x16_t vreinterpretq_s8_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
+int8x16_t vreinterpretq_s8(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
+uint16x8_t vreinterpretq_u16_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s16)))
+uint16x8_t vreinterpretq_u16(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
+uint16x8_t vreinterpretq_u16_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s32)))
+uint16x8_t vreinterpretq_u16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
+uint16x8_t vreinterpretq_u16_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s64)))
+uint16x8_t vreinterpretq_u16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
+uint16x8_t vreinterpretq_u16_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_s8)))
+uint16x8_t vreinterpretq_u16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
+uint16x8_t vreinterpretq_u16_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u32)))
+uint16x8_t vreinterpretq_u16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
+uint16x8_t vreinterpretq_u16_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u64)))
+uint16x8_t vreinterpretq_u16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
+uint16x8_t vreinterpretq_u16_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
+uint16x8_t vreinterpretq_u16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
+uint32x4_t vreinterpretq_u32_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s16)))
+uint32x4_t vreinterpretq_u32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
+uint32x4_t vreinterpretq_u32_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s32)))
+uint32x4_t vreinterpretq_u32(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
+uint32x4_t vreinterpretq_u32_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s64)))
+uint32x4_t vreinterpretq_u32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
+uint32x4_t vreinterpretq_u32_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_s8)))
+uint32x4_t vreinterpretq_u32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
+uint32x4_t vreinterpretq_u32_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u16)))
+uint32x4_t vreinterpretq_u32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
+uint32x4_t vreinterpretq_u32_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u64)))
+uint32x4_t vreinterpretq_u32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
+uint32x4_t vreinterpretq_u32_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
+uint32x4_t vreinterpretq_u32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
+uint64x2_t vreinterpretq_u64_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s16)))
+uint64x2_t vreinterpretq_u64(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
+uint64x2_t vreinterpretq_u64_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s32)))
+uint64x2_t vreinterpretq_u64(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
+uint64x2_t vreinterpretq_u64_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s64)))
+uint64x2_t vreinterpretq_u64(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
+uint64x2_t vreinterpretq_u64_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_s8)))
+uint64x2_t vreinterpretq_u64(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
+uint64x2_t vreinterpretq_u64_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u16)))
+uint64x2_t vreinterpretq_u64(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
+uint64x2_t vreinterpretq_u64_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u32)))
+uint64x2_t vreinterpretq_u64(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
+uint64x2_t vreinterpretq_u64_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
+uint64x2_t vreinterpretq_u64(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
+uint8x16_t vreinterpretq_u8_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
+uint8x16_t vreinterpretq_u8(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
+uint8x16_t vreinterpretq_u8_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
+uint8x16_t vreinterpretq_u8(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
+uint8x16_t vreinterpretq_u8_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
+uint8x16_t vreinterpretq_u8(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
+uint8x16_t vreinterpretq_u8_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
+uint8x16_t vreinterpretq_u8(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
+uint8x16_t vreinterpretq_u8_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
+uint8x16_t vreinterpretq_u8(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
+uint8x16_t vreinterpretq_u8_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
+uint8x16_t vreinterpretq_u8(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
+uint8x16_t vreinterpretq_u8_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
+uint8x16_t vreinterpretq_u8(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s16)))
+int16x8_t vsetq_lane_s16(int16_t, int16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s16)))
+int16x8_t vsetq_lane(int16_t, int16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s32)))
+int32x4_t vsetq_lane_s32(int32_t, int32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s32)))
+int32x4_t vsetq_lane(int32_t, int32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s64)))
+int64x2_t vsetq_lane_s64(int64_t, int64x2_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s64)))
+int64x2_t vsetq_lane(int64_t, int64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s8)))
+int8x16_t vsetq_lane_s8(int8_t, int8x16_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_s8)))
+int8x16_t vsetq_lane(int8_t, int8x16_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u16)))
+uint16x8_t vsetq_lane_u16(uint16_t, uint16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u16)))
+uint16x8_t vsetq_lane(uint16_t, uint16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u32)))
+uint32x4_t vsetq_lane_u32(uint32_t, uint32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u32)))
+uint32x4_t vsetq_lane(uint32_t, uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u64)))
+uint64x2_t vsetq_lane_u64(uint64_t, uint64x2_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u64)))
+uint64x2_t vsetq_lane(uint64_t, uint64x2_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u8)))
+uint8x16_t vsetq_lane_u8(uint8_t, uint8x16_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_u8)))
+uint8x16_t vsetq_lane(uint8_t, uint8x16_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s16)))
+void vst1q_p_s16(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s16)))
+void vst1q_p(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s32)))
+void vst1q_p_s32(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s32)))
+void vst1q_p(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s8)))
+void vst1q_p_s8(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_s8)))
+void vst1q_p(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u16)))
+void vst1q_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u16)))
+void vst1q_p(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u32)))
+void vst1q_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u32)))
+void vst1q_p(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u8)))
+void vst1q_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_u8)))
+void vst1q_p(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s16)))
+void vst1q_s16(int16_t *, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s16)))
+void vst1q(int16_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s32)))
+void vst1q_s32(int32_t *, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s32)))
+void vst1q(int32_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_s8)))
+void vst1q_s8(int8_t *, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_s8)))
+void vst1q(int8_t *, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u16)))
+void vst1q_u16(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u16)))
+void vst1q(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u32)))
+void vst1q_u32(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u32)))
+void vst1q(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_u8)))
+void vst1q_u8(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_u8)))
+void vst1q(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s16)))
+void vst2q_s16(int16_t *, int16x8x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s16)))
+void vst2q(int16_t *, int16x8x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s32)))
+void vst2q_s32(int32_t *, int32x4x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s32)))
+void vst2q(int32_t *, int32x4x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_s8)))
+void vst2q_s8(int8_t *, int8x16x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_s8)))
+void vst2q(int8_t *, int8x16x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u16)))
+void vst2q_u16(uint16_t *, uint16x8x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u16)))
+void vst2q(uint16_t *, uint16x8x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u32)))
+void vst2q_u32(uint32_t *, uint32x4x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u32)))
+void vst2q(uint32_t *, uint32x4x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_u8)))
+void vst2q_u8(uint8_t *, uint8x16x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_u8)))
+void vst2q(uint8_t *, uint8x16x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s16)))
+void vst4q_s16(int16_t *, int16x8x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s16)))
+void vst4q(int16_t *, int16x8x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s32)))
+void vst4q_s32(int32_t *, int32x4x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s32)))
+void vst4q(int32_t *, int32x4x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_s8)))
+void vst4q_s8(int8_t *, int8x16x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_s8)))
+void vst4q(int8_t *, int8x16x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u16)))
+void vst4q_u16(uint16_t *, uint16x8x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u16)))
+void vst4q(uint16_t *, uint16x8x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u32)))
+void vst4q_u32(uint32_t *, uint32x4x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u32)))
+void vst4q(uint32_t *, uint32x4x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_u8)))
+void vst4q_u8(uint8_t *, uint8x16x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_u8)))
+void vst4q(uint8_t *, uint8x16x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s16)))
+void vstrbq_p_s16(int8_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s16)))
+void vstrbq_p(int8_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s32)))
+void vstrbq_p_s32(int8_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s32)))
+void vstrbq_p(int8_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s8)))
+void vstrbq_p_s8(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_s8)))
+void vstrbq_p(int8_t *, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u16)))
+void vstrbq_p_u16(uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u16)))
+void vstrbq_p(uint8_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u32)))
+void vstrbq_p_u32(uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u32)))
+void vstrbq_p(uint8_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u8)))
+void vstrbq_p_u8(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_p_u8)))
+void vstrbq_p(uint8_t *, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s16)))
+void vstrbq_s16(int8_t *, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s16)))
+void vstrbq(int8_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s32)))
+void vstrbq_s32(int8_t *, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s32)))
+void vstrbq(int8_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s8)))
+void vstrbq_s8(int8_t *, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_s8)))
+void vstrbq(int8_t *, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
+void vstrbq_scatter_offset_p_s16(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s16)))
+void vstrbq_scatter_offset_p(int8_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
+void vstrbq_scatter_offset_p_s32(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s32)))
+void vstrbq_scatter_offset_p(int8_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
+void vstrbq_scatter_offset_p_s8(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_s8)))
+void vstrbq_scatter_offset_p(int8_t *, uint8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
+void vstrbq_scatter_offset_p_u16(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u16)))
+void vstrbq_scatter_offset_p(uint8_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
+void vstrbq_scatter_offset_p_u32(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u32)))
+void vstrbq_scatter_offset_p(uint8_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
+void vstrbq_scatter_offset_p_u8(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_p_u8)))
+void vstrbq_scatter_offset_p(uint8_t *, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
+void vstrbq_scatter_offset_s16(int8_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s16)))
+void vstrbq_scatter_offset(int8_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
+void vstrbq_scatter_offset_s32(int8_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s32)))
+void vstrbq_scatter_offset(int8_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
+void vstrbq_scatter_offset_s8(int8_t *, uint8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_s8)))
+void vstrbq_scatter_offset(int8_t *, uint8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
+void vstrbq_scatter_offset_u16(uint8_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u16)))
+void vstrbq_scatter_offset(uint8_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
+void vstrbq_scatter_offset_u32(uint8_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u32)))
+void vstrbq_scatter_offset(uint8_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
+void vstrbq_scatter_offset_u8(uint8_t *, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_scatter_offset_u8)))
+void vstrbq_scatter_offset(uint8_t *, uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u16)))
+void vstrbq_u16(uint8_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u16)))
+void vstrbq(uint8_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u32)))
+void vstrbq_u32(uint8_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u32)))
+void vstrbq(uint8_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u8)))
+void vstrbq_u8(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrbq_u8)))
+void vstrbq(uint8_t *, uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
+void vstrdq_scatter_base_p_s64(uint64x2_t, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_s64)))
+void vstrdq_scatter_base_p(uint64x2_t, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
+void vstrdq_scatter_base_p_u64(uint64x2_t, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_p_u64)))
+void vstrdq_scatter_base_p(uint64x2_t, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
+void vstrdq_scatter_base_s64(uint64x2_t, int, int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_s64)))
+void vstrdq_scatter_base(uint64x2_t, int, int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
+void vstrdq_scatter_base_u64(uint64x2_t, int, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_u64)))
+void vstrdq_scatter_base(uint64x2_t, int, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
+void vstrdq_scatter_base_wb_p_s64(uint64x2_t *, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_s64)))
+void vstrdq_scatter_base_wb_p(uint64x2_t *, int, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
+void vstrdq_scatter_base_wb_p_u64(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_p_u64)))
+void vstrdq_scatter_base_wb_p(uint64x2_t *, int, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
+void vstrdq_scatter_base_wb_s64(uint64x2_t *, int, int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_s64)))
+void vstrdq_scatter_base_wb(uint64x2_t *, int, int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
+void vstrdq_scatter_base_wb_u64(uint64x2_t *, int, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_base_wb_u64)))
+void vstrdq_scatter_base_wb(uint64x2_t *, int, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
+void vstrdq_scatter_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_s64)))
+void vstrdq_scatter_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
+void vstrdq_scatter_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_p_u64)))
+void vstrdq_scatter_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
+void vstrdq_scatter_offset_s64(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_s64)))
+void vstrdq_scatter_offset(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
+void vstrdq_scatter_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_offset_u64)))
+void vstrdq_scatter_offset(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
+void vstrdq_scatter_shifted_offset_p_s64(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_s64)))
+void vstrdq_scatter_shifted_offset_p(int64_t *, uint64x2_t, int64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
+void vstrdq_scatter_shifted_offset_p_u64(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_p_u64)))
+void vstrdq_scatter_shifted_offset_p(uint64_t *, uint64x2_t, uint64x2_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
+void vstrdq_scatter_shifted_offset_s64(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_s64)))
+void vstrdq_scatter_shifted_offset(int64_t *, uint64x2_t, int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
+void vstrdq_scatter_shifted_offset_u64(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrdq_scatter_shifted_offset_u64)))
+void vstrdq_scatter_shifted_offset(uint64_t *, uint64x2_t, uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s16)))
+void vstrhq_p_s16(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s16)))
+void vstrhq_p(int16_t *, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s32)))
+void vstrhq_p_s32(int16_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_s32)))
+void vstrhq_p(int16_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u16)))
+void vstrhq_p_u16(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u16)))
+void vstrhq_p(uint16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u32)))
+void vstrhq_p_u32(uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_u32)))
+void vstrhq_p(uint16_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s16)))
+void vstrhq_s16(int16_t *, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s16)))
+void vstrhq(int16_t *, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s32)))
+void vstrhq_s32(int16_t *, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_s32)))
+void vstrhq(int16_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
+void vstrhq_scatter_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s16)))
+void vstrhq_scatter_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
+void vstrhq_scatter_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_s32)))
+void vstrhq_scatter_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
+void vstrhq_scatter_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u16)))
+void vstrhq_scatter_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
+void vstrhq_scatter_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_u32)))
+void vstrhq_scatter_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
+void vstrhq_scatter_offset_s16(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s16)))
+void vstrhq_scatter_offset(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
+void vstrhq_scatter_offset_s32(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_s32)))
+void vstrhq_scatter_offset(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
+void vstrhq_scatter_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u16)))
+void vstrhq_scatter_offset(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
+void vstrhq_scatter_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_u32)))
+void vstrhq_scatter_offset(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
+void vstrhq_scatter_shifted_offset_p_s16(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s16)))
+void vstrhq_scatter_shifted_offset_p(int16_t *, uint16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
+void vstrhq_scatter_shifted_offset_p_s32(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_s32)))
+void vstrhq_scatter_shifted_offset_p(int16_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
+void vstrhq_scatter_shifted_offset_p_u16(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u16)))
+void vstrhq_scatter_shifted_offset_p(uint16_t *, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
+void vstrhq_scatter_shifted_offset_p_u32(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_u32)))
+void vstrhq_scatter_shifted_offset_p(uint16_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
+void vstrhq_scatter_shifted_offset_s16(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s16)))
+void vstrhq_scatter_shifted_offset(int16_t *, uint16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
+void vstrhq_scatter_shifted_offset_s32(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_s32)))
+void vstrhq_scatter_shifted_offset(int16_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
+void vstrhq_scatter_shifted_offset_u16(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u16)))
+void vstrhq_scatter_shifted_offset(uint16_t *, uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
+void vstrhq_scatter_shifted_offset_u32(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_u32)))
+void vstrhq_scatter_shifted_offset(uint16_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u16)))
+void vstrhq_u16(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u16)))
+void vstrhq(uint16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u32)))
+void vstrhq_u32(uint16_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_u32)))
+void vstrhq(uint16_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_s32)))
+void vstrwq_p_s32(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_s32)))
+void vstrwq_p(int32_t *, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_u32)))
+void vstrwq_p_u32(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_u32)))
+void vstrwq_p(uint32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_s32)))
+void vstrwq_s32(int32_t *, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_s32)))
+void vstrwq(int32_t *, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
+void vstrwq_scatter_base_p_s32(uint32x4_t, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_s32)))
+void vstrwq_scatter_base_p(uint32x4_t, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
+void vstrwq_scatter_base_p_u32(uint32x4_t, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_u32)))
+void vstrwq_scatter_base_p(uint32x4_t, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
+void vstrwq_scatter_base_s32(uint32x4_t, int, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_s32)))
+void vstrwq_scatter_base(uint32x4_t, int, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
+void vstrwq_scatter_base_u32(uint32x4_t, int, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_u32)))
+void vstrwq_scatter_base(uint32x4_t, int, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
+void vstrwq_scatter_base_wb_p_s32(uint32x4_t *, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_s32)))
+void vstrwq_scatter_base_wb_p(uint32x4_t *, int, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
+void vstrwq_scatter_base_wb_p_u32(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_u32)))
+void vstrwq_scatter_base_wb_p(uint32x4_t *, int, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
+void vstrwq_scatter_base_wb_s32(uint32x4_t *, int, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_s32)))
+void vstrwq_scatter_base_wb(uint32x4_t *, int, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
+void vstrwq_scatter_base_wb_u32(uint32x4_t *, int, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_u32)))
+void vstrwq_scatter_base_wb(uint32x4_t *, int, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
+void vstrwq_scatter_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_s32)))
+void vstrwq_scatter_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
+void vstrwq_scatter_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_u32)))
+void vstrwq_scatter_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
+void vstrwq_scatter_offset_s32(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_s32)))
+void vstrwq_scatter_offset(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
+void vstrwq_scatter_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_u32)))
+void vstrwq_scatter_offset(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
+void vstrwq_scatter_shifted_offset_p_s32(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_s32)))
+void vstrwq_scatter_shifted_offset_p(int32_t *, uint32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
+void vstrwq_scatter_shifted_offset_p_u32(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_u32)))
+void vstrwq_scatter_shifted_offset_p(uint32_t *, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
+void vstrwq_scatter_shifted_offset_s32(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_s32)))
+void vstrwq_scatter_shifted_offset(int32_t *, uint32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
+void vstrwq_scatter_shifted_offset_u32(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_u32)))
+void vstrwq_scatter_shifted_offset(uint32_t *, uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_u32)))
+void vstrwq_u32(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_u32)))
+void vstrwq(uint32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s16)))
+int16x8_t vsubq_m_s16(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s16)))
+int16x8_t vsubq_m(int16x8_t, int16x8_t, int16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s32)))
+int32x4_t vsubq_m_s32(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s32)))
+int32x4_t vsubq_m(int32x4_t, int32x4_t, int32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s8)))
+int8x16_t vsubq_m_s8(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_s8)))
+int8x16_t vsubq_m(int8x16_t, int8x16_t, int8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u16)))
+uint16x8_t vsubq_m_u16(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u16)))
+uint16x8_t vsubq_m(uint16x8_t, uint16x8_t, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u32)))
+uint32x4_t vsubq_m_u32(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u32)))
+uint32x4_t vsubq_m(uint32x4_t, uint32x4_t, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u8)))
+uint8x16_t vsubq_m_u8(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_u8)))
+uint8x16_t vsubq_m(uint8x16_t, uint8x16_t, uint8x16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s16)))
+int16x8_t vsubq_s16(int16x8_t, int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s16)))
+int16x8_t vsubq(int16x8_t, int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s32)))
+int32x4_t vsubq_s32(int32x4_t, int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s32)))
+int32x4_t vsubq(int32x4_t, int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_s8)))
+int8x16_t vsubq_s8(int8x16_t, int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_s8)))
+int8x16_t vsubq(int8x16_t, int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u16)))
+uint16x8_t vsubq_u16(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u16)))
+uint16x8_t vsubq(uint16x8_t, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u32)))
+uint32x4_t vsubq_u32(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u32)))
+uint32x4_t vsubq(uint32x4_t, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_u8)))
+uint8x16_t vsubq_u8(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_u8)))
+uint8x16_t vsubq(uint8x16_t, uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s16)))
+int16x8_t vuninitializedq(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s32)))
+int32x4_t vuninitializedq(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s64)))
+int64x2_t vuninitializedq(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_s8)))
+int8x16_t vuninitializedq(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u16)))
+uint16x8_t vuninitializedq(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u32)))
+uint32x4_t vuninitializedq(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u64)))
+uint64x2_t vuninitializedq(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_u8)))
+uint8x16_t vuninitializedq(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s16)))
+int16x8_t vuninitializedq_s16();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s32)))
+int32x4_t vuninitializedq_s32();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s64)))
+int64x2_t vuninitializedq_s64();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_s8)))
+int8x16_t vuninitializedq_s8();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u16)))
+uint16x8_t vuninitializedq_u16();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u32)))
+uint32x4_t vuninitializedq_u32();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u64)))
+uint64x2_t vuninitializedq_u64();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_u8)))
+uint8x16_t vuninitializedq_u8();
+
+#endif /* (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) */
+
+#if (__ARM_FEATURE_MVE & 2) && (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE)
+
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_f16)))
+float16x8_t vaddq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_f16)))
+float16x8_t vaddq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_f32)))
+float32x4_t vaddq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_f32)))
+float32x4_t vaddq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f16)))
+float16x8_t vaddq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f16)))
+float16x8_t vaddq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f32)))
+float32x4_t vaddq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vaddq_m_f32)))
+float32x4_t vaddq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f16)))
+mve_pred16_t vcmpeqq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f16)))
+mve_pred16_t vcmpeqq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f32)))
+mve_pred16_t vcmpeqq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_f32)))
+mve_pred16_t vcmpeqq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
+mve_pred16_t vcmpeqq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f16)))
+mve_pred16_t vcmpeqq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
+mve_pred16_t vcmpeqq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_f32)))
+mve_pred16_t vcmpeqq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
+mve_pred16_t vcmpeqq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f16)))
+mve_pred16_t vcmpeqq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
+mve_pred16_t vcmpeqq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_m_n_f32)))
+mve_pred16_t vcmpeqq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
+mve_pred16_t vcmpeqq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f16)))
+mve_pred16_t vcmpeqq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
+mve_pred16_t vcmpeqq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpeqq_n_f32)))
+mve_pred16_t vcmpeqq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f16)))
+mve_pred16_t vcmpgeq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f16)))
+mve_pred16_t vcmpgeq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f32)))
+mve_pred16_t vcmpgeq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_f32)))
+mve_pred16_t vcmpgeq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
+mve_pred16_t vcmpgeq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f16)))
+mve_pred16_t vcmpgeq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
+mve_pred16_t vcmpgeq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_f32)))
+mve_pred16_t vcmpgeq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
+mve_pred16_t vcmpgeq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f16)))
+mve_pred16_t vcmpgeq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
+mve_pred16_t vcmpgeq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_m_n_f32)))
+mve_pred16_t vcmpgeq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
+mve_pred16_t vcmpgeq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f16)))
+mve_pred16_t vcmpgeq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
+mve_pred16_t vcmpgeq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgeq_n_f32)))
+mve_pred16_t vcmpgeq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f16)))
+mve_pred16_t vcmpgtq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f16)))
+mve_pred16_t vcmpgtq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f32)))
+mve_pred16_t vcmpgtq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_f32)))
+mve_pred16_t vcmpgtq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
+mve_pred16_t vcmpgtq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f16)))
+mve_pred16_t vcmpgtq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
+mve_pred16_t vcmpgtq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_f32)))
+mve_pred16_t vcmpgtq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
+mve_pred16_t vcmpgtq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f16)))
+mve_pred16_t vcmpgtq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
+mve_pred16_t vcmpgtq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_m_n_f32)))
+mve_pred16_t vcmpgtq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
+mve_pred16_t vcmpgtq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f16)))
+mve_pred16_t vcmpgtq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
+mve_pred16_t vcmpgtq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpgtq_n_f32)))
+mve_pred16_t vcmpgtq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f16)))
+mve_pred16_t vcmpleq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f16)))
+mve_pred16_t vcmpleq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f32)))
+mve_pred16_t vcmpleq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_f32)))
+mve_pred16_t vcmpleq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f16)))
+mve_pred16_t vcmpleq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f16)))
+mve_pred16_t vcmpleq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f32)))
+mve_pred16_t vcmpleq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_f32)))
+mve_pred16_t vcmpleq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
+mve_pred16_t vcmpleq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f16)))
+mve_pred16_t vcmpleq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
+mve_pred16_t vcmpleq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_m_n_f32)))
+mve_pred16_t vcmpleq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f16)))
+mve_pred16_t vcmpleq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f16)))
+mve_pred16_t vcmpleq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f32)))
+mve_pred16_t vcmpleq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpleq_n_f32)))
+mve_pred16_t vcmpleq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f16)))
+mve_pred16_t vcmpltq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f16)))
+mve_pred16_t vcmpltq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f32)))
+mve_pred16_t vcmpltq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_f32)))
+mve_pred16_t vcmpltq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f16)))
+mve_pred16_t vcmpltq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f16)))
+mve_pred16_t vcmpltq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f32)))
+mve_pred16_t vcmpltq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_f32)))
+mve_pred16_t vcmpltq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
+mve_pred16_t vcmpltq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f16)))
+mve_pred16_t vcmpltq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
+mve_pred16_t vcmpltq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_m_n_f32)))
+mve_pred16_t vcmpltq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f16)))
+mve_pred16_t vcmpltq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f16)))
+mve_pred16_t vcmpltq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f32)))
+mve_pred16_t vcmpltq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpltq_n_f32)))
+mve_pred16_t vcmpltq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f16)))
+mve_pred16_t vcmpneq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f16)))
+mve_pred16_t vcmpneq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f32)))
+mve_pred16_t vcmpneq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_f32)))
+mve_pred16_t vcmpneq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f16)))
+mve_pred16_t vcmpneq_m_f16(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f16)))
+mve_pred16_t vcmpneq_m(float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f32)))
+mve_pred16_t vcmpneq_m_f32(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_f32)))
+mve_pred16_t vcmpneq_m(float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
+mve_pred16_t vcmpneq_m_n_f16(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f16)))
+mve_pred16_t vcmpneq_m(float16x8_t, float16_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
+mve_pred16_t vcmpneq_m_n_f32(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_m_n_f32)))
+mve_pred16_t vcmpneq_m(float32x4_t, float32_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f16)))
+mve_pred16_t vcmpneq_n_f16(float16x8_t, float16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f16)))
+mve_pred16_t vcmpneq(float16x8_t, float16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f32)))
+mve_pred16_t vcmpneq_n_f32(float32x4_t, float32_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vcmpneq_n_f32)))
+mve_pred16_t vcmpneq(float32x4_t, float32_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_f16)))
+float16x8_t vcreateq_f16(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcreateq_f32)))
+float32x4_t vcreateq_f32(uint64_t, uint64_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtbq_f16_f32)))
+float16x8_t vcvtbq_f16_f32(float16x8_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvtbq_m_f16_f32)))
+float16x8_t vcvtbq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvttq_f16_f32)))
+float16x8_t vcvttq_f16_f32(float16x8_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vcvttq_m_f16_f32)))
+float16x8_t vcvttq_m_f16_f32(float16x8_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f16)))
+float16_t vgetq_lane_f16(float16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f16)))
+float16_t vgetq_lane(float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f32)))
+float32_t vgetq_lane_f32(float32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vgetq_lane_f32)))
+float32_t vgetq_lane(float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_f16)))
+float16x8_t vld1q_f16(const float16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_f16)))
+float16x8_t vld1q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_f32)))
+float32x4_t vld1q_f32(const float32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_f32)))
+float32x4_t vld1q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f16)))
+float16x8_t vld1q_z_f16(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f16)))
+float16x8_t vld1q_z(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f32)))
+float32x4_t vld1q_z_f32(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld1q_z_f32)))
+float32x4_t vld1q_z(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_f16)))
+float16x8x2_t vld2q_f16(const float16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_f16)))
+float16x8x2_t vld2q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld2q_f32)))
+float32x4x2_t vld2q_f32(const float32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld2q_f32)))
+float32x4x2_t vld2q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_f16)))
+float16x8x4_t vld4q_f16(const float16_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_f16)))
+float16x8x4_t vld4q(const float16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vld4q_f32)))
+float32x4x4_t vld4q_f32(const float32_t *);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vld4q_f32)))
+float32x4x4_t vld4q(const float32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_f16)))
+float16x8_t vldrhq_f16(const float16_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
+float16x8_t vldrhq_gather_offset_f16(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_f16)))
+float16x8_t vldrhq_gather_offset(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
+float16x8_t vldrhq_gather_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_offset_z_f16)))
+float16x8_t vldrhq_gather_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
+float16x8_t vldrhq_gather_shifted_offset_f16(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_f16)))
+float16x8_t vldrhq_gather_shifted_offset(const float16_t *, uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
+float16x8_t vldrhq_gather_shifted_offset_z_f16(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrhq_gather_shifted_offset_z_f16)))
+float16x8_t vldrhq_gather_shifted_offset_z(const float16_t *, uint16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrhq_z_f16)))
+float16x8_t vldrhq_z_f16(const float16_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_f32)))
+float32x4_t vldrwq_f32(const float32_t *);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_f32)))
+float32x4_t vldrwq_gather_base_f32(uint32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_f32)))
+float32x4_t vldrwq_gather_base_wb_f32(uint32x4_t *, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_wb_z_f32)))
+float32x4_t vldrwq_gather_base_wb_z_f32(uint32x4_t *, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_base_z_f32)))
+float32x4_t vldrwq_gather_base_z_f32(uint32x4_t, int, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
+float32x4_t vldrwq_gather_offset_f32(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_f32)))
+float32x4_t vldrwq_gather_offset(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
+float32x4_t vldrwq_gather_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_offset_z_f32)))
+float32x4_t vldrwq_gather_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
+float32x4_t vldrwq_gather_shifted_offset_f32(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_f32)))
+float32x4_t vldrwq_gather_shifted_offset(const float32_t *, uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
+float32x4_t vldrwq_gather_shifted_offset_z_f32(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vldrwq_gather_shifted_offset_z_f32)))
+float32x4_t vldrwq_gather_shifted_offset_z(const float32_t *, uint32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vldrwq_z_f32)))
+float32x4_t vldrwq_z_f32(const float32_t *, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
+float16x8_t vreinterpretq_f16_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_f32)))
+float16x8_t vreinterpretq_f16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
+float16x8_t vreinterpretq_f16_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s16)))
+float16x8_t vreinterpretq_f16(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
+float16x8_t vreinterpretq_f16_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s32)))
+float16x8_t vreinterpretq_f16(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
+float16x8_t vreinterpretq_f16_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s64)))
+float16x8_t vreinterpretq_f16(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
+float16x8_t vreinterpretq_f16_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_s8)))
+float16x8_t vreinterpretq_f16(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
+float16x8_t vreinterpretq_f16_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u16)))
+float16x8_t vreinterpretq_f16(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
+float16x8_t vreinterpretq_f16_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u32)))
+float16x8_t vreinterpretq_f16(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
+float16x8_t vreinterpretq_f16_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u64)))
+float16x8_t vreinterpretq_f16(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
+float16x8_t vreinterpretq_f16_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
+float16x8_t vreinterpretq_f16(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
+float32x4_t vreinterpretq_f32_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_f16)))
+float32x4_t vreinterpretq_f32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
+float32x4_t vreinterpretq_f32_s16(int16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s16)))
+float32x4_t vreinterpretq_f32(int16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
+float32x4_t vreinterpretq_f32_s32(int32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s32)))
+float32x4_t vreinterpretq_f32(int32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
+float32x4_t vreinterpretq_f32_s64(int64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s64)))
+float32x4_t vreinterpretq_f32(int64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
+float32x4_t vreinterpretq_f32_s8(int8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_s8)))
+float32x4_t vreinterpretq_f32(int8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
+float32x4_t vreinterpretq_f32_u16(uint16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u16)))
+float32x4_t vreinterpretq_f32(uint16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
+float32x4_t vreinterpretq_f32_u32(uint32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u32)))
+float32x4_t vreinterpretq_f32(uint32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
+float32x4_t vreinterpretq_f32_u64(uint64x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u64)))
+float32x4_t vreinterpretq_f32(uint64x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
+float32x4_t vreinterpretq_f32_u8(uint8x16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
+float32x4_t vreinterpretq_f32(uint8x16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
+int16x8_t vreinterpretq_s16_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f16)))
+int16x8_t vreinterpretq_s16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
+int16x8_t vreinterpretq_s16_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s16_f32)))
+int16x8_t vreinterpretq_s16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
+int32x4_t vreinterpretq_s32_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f16)))
+int32x4_t vreinterpretq_s32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
+int32x4_t vreinterpretq_s32_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s32_f32)))
+int32x4_t vreinterpretq_s32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
+int64x2_t vreinterpretq_s64_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f16)))
+int64x2_t vreinterpretq_s64(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
+int64x2_t vreinterpretq_s64_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s64_f32)))
+int64x2_t vreinterpretq_s64(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
+int8x16_t vreinterpretq_s8_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f16)))
+int8x16_t vreinterpretq_s8(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
+int8x16_t vreinterpretq_s8_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_s8_f32)))
+int8x16_t vreinterpretq_s8(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
+uint16x8_t vreinterpretq_u16_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f16)))
+uint16x8_t vreinterpretq_u16(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
+uint16x8_t vreinterpretq_u16_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u16_f32)))
+uint16x8_t vreinterpretq_u16(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
+uint32x4_t vreinterpretq_u32_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f16)))
+uint32x4_t vreinterpretq_u32(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
+uint32x4_t vreinterpretq_u32_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u32_f32)))
+uint32x4_t vreinterpretq_u32(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
+uint64x2_t vreinterpretq_u64_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f16)))
+uint64x2_t vreinterpretq_u64(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
+uint64x2_t vreinterpretq_u64_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u64_f32)))
+uint64x2_t vreinterpretq_u64(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
+uint8x16_t vreinterpretq_u8_f16(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
+uint8x16_t vreinterpretq_u8(float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
+uint8x16_t vreinterpretq_u8_f32(float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
+uint8x16_t vreinterpretq_u8(float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f16)))
+float16x8_t vsetq_lane_f16(float16_t, float16x8_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f16)))
+float16x8_t vsetq_lane(float16_t, float16x8_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f32)))
+float32x4_t vsetq_lane_f32(float32_t, float32x4_t, int);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsetq_lane_f32)))
+float32x4_t vsetq_lane(float32_t, float32x4_t, int);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_f16)))
+void vst1q_f16(float16_t *, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_f16)))
+void vst1q(float16_t *, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_f32)))
+void vst1q_f32(float32_t *, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_f32)))
+void vst1q(float32_t *, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f16)))
+void vst1q_p_f16(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f16)))
+void vst1q_p(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f32)))
+void vst1q_p_f32(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst1q_p_f32)))
+void vst1q_p(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_f16)))
+void vst2q_f16(float16_t *, float16x8x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_f16)))
+void vst2q(float16_t *, float16x8x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst2q_f32)))
+void vst2q_f32(float32_t *, float32x4x2_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst2q_f32)))
+void vst2q(float32_t *, float32x4x2_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_f16)))
+void vst4q_f16(float16_t *, float16x8x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_f16)))
+void vst4q(float16_t *, float16x8x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vst4q_f32)))
+void vst4q_f32(float32_t *, float32x4x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vst4q_f32)))
+void vst4q(float32_t *, float32x4x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_f16)))
+void vstrhq_f16(float16_t *, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_f16)))
+void vstrhq(float16_t *, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_f16)))
+void vstrhq_p_f16(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_p_f16)))
+void vstrhq_p(float16_t *, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
+void vstrhq_scatter_offset_f16(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_f16)))
+void vstrhq_scatter_offset(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
+void vstrhq_scatter_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_offset_p_f16)))
+void vstrhq_scatter_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
+void vstrhq_scatter_shifted_offset_f16(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_f16)))
+void vstrhq_scatter_shifted_offset(float16_t *, uint16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
+void vstrhq_scatter_shifted_offset_p_f16(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrhq_scatter_shifted_offset_p_f16)))
+void vstrhq_scatter_shifted_offset_p(float16_t *, uint16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_f32)))
+void vstrwq_f32(float32_t *, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_f32)))
+void vstrwq(float32_t *, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_f32)))
+void vstrwq_p_f32(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_p_f32)))
+void vstrwq_p(float32_t *, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
+void vstrwq_scatter_base_f32(uint32x4_t, int, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_f32)))
+void vstrwq_scatter_base(uint32x4_t, int, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
+void vstrwq_scatter_base_p_f32(uint32x4_t, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_p_f32)))
+void vstrwq_scatter_base_p(uint32x4_t, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
+void vstrwq_scatter_base_wb_f32(uint32x4_t *, int, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_f32)))
+void vstrwq_scatter_base_wb(uint32x4_t *, int, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
+void vstrwq_scatter_base_wb_p_f32(uint32x4_t *, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_base_wb_p_f32)))
+void vstrwq_scatter_base_wb_p(uint32x4_t *, int, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
+void vstrwq_scatter_offset_f32(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_f32)))
+void vstrwq_scatter_offset(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
+void vstrwq_scatter_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_offset_p_f32)))
+void vstrwq_scatter_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
+void vstrwq_scatter_shifted_offset_f32(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_f32)))
+void vstrwq_scatter_shifted_offset(float32_t *, uint32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
+void vstrwq_scatter_shifted_offset_p_f32(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vstrwq_scatter_shifted_offset_p_f32)))
+void vstrwq_scatter_shifted_offset_p(float32_t *, uint32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_f16)))
+float16x8_t vsubq_f16(float16x8_t, float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_f16)))
+float16x8_t vsubq(float16x8_t, float16x8_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_f32)))
+float32x4_t vsubq_f32(float32x4_t, float32x4_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_f32)))
+float32x4_t vsubq(float32x4_t, float32x4_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f16)))
+float16x8_t vsubq_m_f16(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f16)))
+float16x8_t vsubq_m(float16x8_t, float16x8_t, float16x8_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f32)))
+float32x4_t vsubq_m_f32(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vsubq_m_f32)))
+float32x4_t vsubq_m(float32x4_t, float32x4_t, float32x4_t, mve_pred16_t);
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_f16)))
+float16x8_t vuninitializedq_f16();
+static __inline__ __attribute__((__clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_f32)))
+float32x4_t vuninitializedq_f32();
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f16)))
+float16x8_t vuninitializedq(float16x8_t);
+static __inline__ __attribute__((overloadable, __clang_arm_mve_alias(__builtin_arm_mve_vuninitializedq_polymorphic_f32)))
+float32x4_t vuninitializedq(float32x4_t);
+
+#endif /* (__ARM_FEATURE_MVE & 2) && (!defined __ARM_MVE_PRESERVE_USER_NAMESPACE) */
+
+#endif /* __ARM_MVE_H */
diff --git a/darwin-x86/lib64/clang/10.0.1/include/arm_neon.h b/linux-x86/lib64/clang/10.0.5/include/arm_neon.h
similarity index 97%
copy from darwin-x86/lib64/clang/10.0.1/include/arm_neon.h
copy to linux-x86/lib64/clang/10.0.5/include/arm_neon.h
index 6b55c4b..3c59d59 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/arm_neon.h
+++ b/linux-x86/lib64/clang/10.0.5/include/arm_neon.h
@@ -6086,7 +6086,7 @@
 #define vget_lane_p8(__p0, __p1) __extension__ ({ \
   poly8x8_t __s0 = __p0; \
   poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
+  __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6094,13 +6094,13 @@
   poly8x8_t __s0 = __p0; \
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
+  __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \
   poly8x8_t __s0 = __p0; \
   poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
+  __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6109,7 +6109,7 @@
 #define vget_lane_p16(__p0, __p1) __extension__ ({ \
   poly16x4_t __s0 = __p0; \
   poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+  __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6117,13 +6117,13 @@
   poly16x4_t __s0 = __p0; \
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
+  __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \
   poly16x4_t __s0 = __p0; \
   poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+  __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6132,7 +6132,7 @@
 #define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
   poly8x16_t __s0 = __p0; \
   poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
+  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6140,13 +6140,13 @@
   poly8x16_t __s0 = __p0; \
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
+  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \
   poly8x16_t __s0 = __p0; \
   poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
+  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6155,7 +6155,7 @@
 #define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
   poly16x8_t __s0 = __p0; \
   poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6163,13 +6163,13 @@
   poly16x8_t __s0 = __p0; \
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
+  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \
   poly16x8_t __s0 = __p0; \
   poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6201,7 +6201,7 @@
 #define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
   uint32x4_t __s0 = __p0; \
   uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
+  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6209,13 +6209,13 @@
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \
+  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \
   uint32x4_t __s0 = __p0; \
   uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
+  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6224,7 +6224,7 @@
 #define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
   uint64x2_t __s0 = __p0; \
   uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6232,13 +6232,13 @@
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
+  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \
   uint64x2_t __s0 = __p0; \
   uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6247,7 +6247,7 @@
 #define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
   uint16x8_t __s0 = __p0; \
   uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6255,13 +6255,13 @@
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
+  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \
   uint16x8_t __s0 = __p0; \
   uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6293,7 +6293,7 @@
 #define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
   float32x4_t __s0 = __p0; \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \
+  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6301,13 +6301,13 @@
   float32x4_t __s0 = __p0; \
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__rev0, __p1); \
+  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \
   float32x4_t __s0 = __p0; \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \
+  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6316,7 +6316,7 @@
 #define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
   int32x4_t __s0 = __p0; \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
+  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6324,13 +6324,13 @@
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \
+  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \
   int32x4_t __s0 = __p0; \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
+  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6339,7 +6339,7 @@
 #define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
   int64x2_t __s0 = __p0; \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6347,13 +6347,13 @@
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
+  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \
   int64x2_t __s0 = __p0; \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6362,7 +6362,7 @@
 #define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
   int16x8_t __s0 = __p0; \
   int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6370,13 +6370,13 @@
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
+  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \
   int16x8_t __s0 = __p0; \
   int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
+  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6408,7 +6408,7 @@
 #define vget_lane_u32(__p0, __p1) __extension__ ({ \
   uint32x2_t __s0 = __p0; \
   uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
+  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6416,13 +6416,13 @@
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \
+  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \
   uint32x2_t __s0 = __p0; \
   uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
+  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6430,14 +6430,14 @@
 #define vget_lane_u64(__p0, __p1) __extension__ ({ \
   uint64x1_t __s0 = __p0; \
   uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
+  __ret = (uint64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_u16(__p0, __p1) __extension__ ({ \
   uint16x4_t __s0 = __p0; \
   uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6445,13 +6445,13 @@
   uint16x4_t __s0 = __p0; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
+  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \
   uint16x4_t __s0 = __p0; \
   uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6483,7 +6483,7 @@
 #define vget_lane_f32(__p0, __p1) __extension__ ({ \
   float32x2_t __s0 = __p0; \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \
+  __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6491,13 +6491,13 @@
   float32x2_t __s0 = __p0; \
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__rev0, __p1); \
+  __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \
   float32x2_t __s0 = __p0; \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \
+  __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6506,7 +6506,7 @@
 #define vget_lane_s32(__p0, __p1) __extension__ ({ \
   int32x2_t __s0 = __p0; \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
+  __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6514,13 +6514,13 @@
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \
+  __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \
   int32x2_t __s0 = __p0; \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
+  __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -6528,14 +6528,14 @@
 #define vget_lane_s64(__p0, __p1) __extension__ ({ \
   int64x1_t __s0 = __p0; \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
+  __ret = (int64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vget_lane_s16(__p0, __p1) __extension__ ({ \
   int16x4_t __s0 = __p0; \
   int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+  __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -6543,13 +6543,13 @@
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
+  __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \
   int16x4_t __s0 = __p0; \
   int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
+  __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -10034,7 +10034,7 @@
 #define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x4x2_t __s1 = __p1; \
   float32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 41); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \
   __ret; \
 })
 #else
@@ -10044,7 +10044,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   float32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 41); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -10056,7 +10056,7 @@
 #define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x4x2_t __s1 = __p1; \
   int32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 34); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \
   __ret; \
 })
 #else
@@ -10066,7 +10066,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   int32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 34); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -10078,7 +10078,7 @@
 #define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x8x2_t __s1 = __p1; \
   int16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 33); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \
   __ret; \
 })
 #else
@@ -10088,7 +10088,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 33); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -10188,7 +10188,7 @@
 #define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x2x2_t __s1 = __p1; \
   float32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 9); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \
   __ret; \
 })
 #else
@@ -10198,7 +10198,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   float32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 9); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -10210,7 +10210,7 @@
 #define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x2x2_t __s1 = __p1; \
   int32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 2); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \
   __ret; \
 })
 #else
@@ -10220,7 +10220,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   int32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 2); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -10232,7 +10232,7 @@
 #define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x4x2_t __s1 = __p1; \
   int16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 1); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \
   __ret; \
 })
 #else
@@ -10242,7 +10242,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   int16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 1); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -11078,7 +11078,7 @@
 #define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x4x3_t __s1 = __p1; \
   float32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \
   __ret; \
 })
 #else
@@ -11089,7 +11089,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   float32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -11102,7 +11102,7 @@
 #define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x4x3_t __s1 = __p1; \
   int32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \
   __ret; \
 })
 #else
@@ -11113,7 +11113,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   int32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -11126,7 +11126,7 @@
 #define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x8x3_t __s1 = __p1; \
   int16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \
   __ret; \
 })
 #else
@@ -11137,7 +11137,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -11246,7 +11246,7 @@
 #define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x2x3_t __s1 = __p1; \
   float32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \
   __ret; \
 })
 #else
@@ -11257,7 +11257,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   float32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -11270,7 +11270,7 @@
 #define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x2x3_t __s1 = __p1; \
   int32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \
   __ret; \
 })
 #else
@@ -11281,7 +11281,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   int32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -11294,7 +11294,7 @@
 #define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x4x3_t __s1 = __p1; \
   int16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \
   __ret; \
 })
 #else
@@ -11305,7 +11305,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   int16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -12190,7 +12190,7 @@
 #define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x4x4_t __s1 = __p1; \
   float32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \
   __ret; \
 })
 #else
@@ -12202,7 +12202,7 @@
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
   float32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -12216,7 +12216,7 @@
 #define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x4x4_t __s1 = __p1; \
   int32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \
   __ret; \
 })
 #else
@@ -12228,7 +12228,7 @@
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
   int32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -12242,7 +12242,7 @@
 #define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x8x4_t __s1 = __p1; \
   int16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \
   __ret; \
 })
 #else
@@ -12254,7 +12254,7 @@
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -12372,7 +12372,7 @@
 #define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x2x4_t __s1 = __p1; \
   float32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \
   __ret; \
 })
 #else
@@ -12384,7 +12384,7 @@
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
   float32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -12398,7 +12398,7 @@
 #define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x2x4_t __s1 = __p1; \
   int32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \
   __ret; \
 })
 #else
@@ -12410,7 +12410,7 @@
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
   int32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -12424,7 +12424,7 @@
 #define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x4x4_t __s1 = __p1; \
   int16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \
   __ret; \
 })
 #else
@@ -12436,7 +12436,7 @@
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
   int16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -15725,20 +15725,20 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
   uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
+  __ret = vmull_u32(__p0, (uint32x2_t) {__p1, __p1});
   return __ret;
 }
 #else
 __ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
+  __ret = __noswap_vmull_u32(__rev0, (uint32x2_t) {__p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
 __ai uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
   uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
+  __ret = __noswap_vmull_u32(__p0, (uint32x2_t) {__p1, __p1});
   return __ret;
 }
 #endif
@@ -15746,20 +15746,20 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
   uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
+  __ret = vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1});
   return __ret;
 }
 #else
 __ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
+  __ret = __noswap_vmull_u16(__rev0, (uint16x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
 __ai uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
   uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
+  __ret = __noswap_vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1});
   return __ret;
 }
 #endif
@@ -15767,20 +15767,20 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+  __ret = vmull_s32(__p0, (int32x2_t) {__p1, __p1});
   return __ret;
 }
 #else
 __ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+  __ret = __noswap_vmull_s32(__rev0, (int32x2_t) {__p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
 __ai int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) {
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+  __ret = __noswap_vmull_s32(__p0, (int32x2_t) {__p1, __p1});
   return __ret;
 }
 #endif
@@ -15788,20 +15788,20 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+  __ret = vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
   return __ret;
 }
 #else
 __ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+  __ret = __noswap_vmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
 __ai int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) {
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+  __ret = __noswap_vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
   return __ret;
 }
 #endif
@@ -17862,7 +17862,7 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+  __ret = vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
   return __ret;
 }
 #else
@@ -17870,13 +17870,13 @@
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+  __ret = __noswap_vqdmlal_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
 __ai int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+  __ret = __noswap_vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
   return __ret;
 }
 #endif
@@ -17884,7 +17884,7 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+  __ret = vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
   return __ret;
 }
 #else
@@ -17892,13 +17892,13 @@
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+  __ret = __noswap_vqdmlal_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
 __ai int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+  __ret = __noswap_vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
   return __ret;
 }
 #endif
@@ -18000,7 +18000,7 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+  __ret = vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
   return __ret;
 }
 #else
@@ -18008,13 +18008,13 @@
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+  __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
 __ai int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
+  __ret = __noswap_vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
   return __ret;
 }
 #endif
@@ -18022,7 +18022,7 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+  __ret = vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
   return __ret;
 }
 #else
@@ -18030,13 +18030,13 @@
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+  __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
 __ai int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
+  __ret = __noswap_vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
   return __ret;
 }
 #endif
@@ -18216,14 +18216,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
+  __ret = vqdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1});
   return __ret;
 }
 #else
 __ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
+  __ret = __noswap_vqdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
@@ -18232,14 +18232,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
   int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
+  __ret = vqdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
   return __ret;
 }
 #else
 __ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
+  __ret = __noswap_vqdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
 }
@@ -18248,14 +18248,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
   int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
+  __ret = vqdmulh_s32(__p0, (int32x2_t) {__p1, __p1});
   return __ret;
 }
 #else
 __ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
+  __ret = __noswap_vqdmulh_s32(__rev0, (int32x2_t) {__p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
@@ -18264,14 +18264,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
   int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
+  __ret = vqdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
   return __ret;
 }
 #else
 __ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
+  __ret = __noswap_vqdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
@@ -18366,20 +18366,20 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+  __ret = vqdmull_s32(__p0, (int32x2_t) {__p1, __p1});
   return __ret;
 }
 #else
 __ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+  __ret = __noswap_vqdmull_s32(__rev0, (int32x2_t) {__p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
 __ai int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
   int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
+  __ret = __noswap_vqdmull_s32(__p0, (int32x2_t) {__p1, __p1});
   return __ret;
 }
 #endif
@@ -18387,20 +18387,20 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+  __ret = vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
   return __ret;
 }
 #else
 __ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+  __ret = __noswap_vqdmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
 __ai int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
+  __ret = __noswap_vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
   return __ret;
 }
 #endif
@@ -18865,14 +18865,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
+  __ret = vqrdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1});
   return __ret;
 }
 #else
 __ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
+  __ret = __noswap_vqrdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
@@ -18881,14 +18881,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
   int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
+  __ret = vqrdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
   return __ret;
 }
 #else
 __ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
+  __ret = __noswap_vqrdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
   return __ret;
 }
@@ -18897,14 +18897,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
   int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
+  __ret = vqrdmulh_s32(__p0, (int32x2_t) {__p1, __p1});
   return __ret;
 }
 #else
 __ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
+  __ret = __noswap_vqrdmulh_s32(__rev0, (int32x2_t) {__p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
   return __ret;
 }
@@ -18913,14 +18913,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
   int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
+  __ret = vqrdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
   return __ret;
 }
 #else
 __ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
+  __ret = __noswap_vqrdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
@@ -22727,7 +22727,7 @@
   poly8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
   poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -22736,7 +22736,7 @@
   poly8x8_t __s1 = __p1; \
   poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
+  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
@@ -22744,7 +22744,7 @@
   poly8_t __s0 = __p0; \
   poly8x8_t __s1 = __p1; \
   poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -22754,7 +22754,7 @@
   poly16_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
   poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -22763,7 +22763,7 @@
   poly16x4_t __s1 = __p1; \
   poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
+  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
@@ -22771,7 +22771,7 @@
   poly16_t __s0 = __p0; \
   poly16x4_t __s1 = __p1; \
   poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -22781,7 +22781,7 @@
   poly8_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
   poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -22790,7 +22790,7 @@
   poly8x16_t __s1 = __p1; \
   poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
@@ -22798,7 +22798,7 @@
   poly8_t __s0 = __p0; \
   poly8x16_t __s1 = __p1; \
   poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -22808,7 +22808,7 @@
   poly16_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
   poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -22817,7 +22817,7 @@
   poly16x8_t __s1 = __p1; \
   poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
@@ -22825,7 +22825,7 @@
   poly16_t __s0 = __p0; \
   poly16x8_t __s1 = __p1; \
   poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -22862,7 +22862,7 @@
   uint32_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -22871,7 +22871,7 @@
   uint32x4_t __s1 = __p1; \
   uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
@@ -22879,7 +22879,7 @@
   uint32_t __s0 = __p0; \
   uint32x4_t __s1 = __p1; \
   uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -22889,7 +22889,7 @@
   uint64_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
   uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -22898,7 +22898,7 @@
   uint64x2_t __s1 = __p1; \
   uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
   uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
@@ -22906,7 +22906,7 @@
   uint64_t __s0 = __p0; \
   uint64x2_t __s1 = __p1; \
   uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -22916,7 +22916,7 @@
   uint16_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
   uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -22925,7 +22925,7 @@
   uint16x8_t __s1 = __p1; \
   uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
@@ -22933,7 +22933,7 @@
   uint16_t __s0 = __p0; \
   uint16x8_t __s1 = __p1; \
   uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -22970,7 +22970,7 @@
   float32_t __s0 = __p0; \
   float32x4_t __s1 = __p1; \
   float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -22979,7 +22979,7 @@
   float32x4_t __s1 = __p1; \
   float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
@@ -22987,7 +22987,7 @@
   float32_t __s0 = __p0; \
   float32x4_t __s1 = __p1; \
   float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -22997,7 +22997,7 @@
   int32_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
   int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -23006,7 +23006,7 @@
   int32x4_t __s1 = __p1; \
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
@@ -23014,7 +23014,7 @@
   int32_t __s0 = __p0; \
   int32x4_t __s1 = __p1; \
   int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -23024,7 +23024,7 @@
   int64_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
   int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -23033,7 +23033,7 @@
   int64x2_t __s1 = __p1; \
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
   int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
@@ -23041,7 +23041,7 @@
   int64_t __s0 = __p0; \
   int64x2_t __s1 = __p1; \
   int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -23051,7 +23051,7 @@
   int16_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
   int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -23060,7 +23060,7 @@
   int16x8_t __s1 = __p1; \
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret; \
 })
@@ -23068,7 +23068,7 @@
   int16_t __s0 = __p0; \
   int16x8_t __s1 = __p1; \
   int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -23105,7 +23105,7 @@
   uint32_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
   uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -23114,7 +23114,7 @@
   uint32x2_t __s1 = __p1; \
   uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
   uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \
+  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
@@ -23122,7 +23122,7 @@
   uint32_t __s0 = __p0; \
   uint32x2_t __s1 = __p1; \
   uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -23131,7 +23131,7 @@
   uint64_t __s0 = __p0; \
   uint64x1_t __s1 = __p1; \
   uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
@@ -23139,7 +23139,7 @@
   uint16_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
   uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -23148,7 +23148,7 @@
   uint16x4_t __s1 = __p1; \
   uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
+  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
@@ -23156,7 +23156,7 @@
   uint16_t __s0 = __p0; \
   uint16x4_t __s1 = __p1; \
   uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -23193,7 +23193,7 @@
   float32_t __s0 = __p0; \
   float32x2_t __s1 = __p1; \
   float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -23202,7 +23202,7 @@
   float32x2_t __s1 = __p1; \
   float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
   float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__rev1, __p2); \
+  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
@@ -23210,7 +23210,7 @@
   float32_t __s0 = __p0; \
   float32x2_t __s1 = __p1; \
   float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -23220,7 +23220,7 @@
   int32_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
   int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -23229,7 +23229,7 @@
   int32x2_t __s1 = __p1; \
   int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
   int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \
+  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
@@ -23237,7 +23237,7 @@
   int32_t __s0 = __p0; \
   int32x2_t __s1 = __p1; \
   int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -23246,7 +23246,7 @@
   int64_t __s0 = __p0; \
   int64x1_t __s1 = __p1; \
   int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
@@ -23254,7 +23254,7 @@
   int16_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
   int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -23263,7 +23263,7 @@
   int16x4_t __s1 = __p1; \
   int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
+  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
   __ret; \
 })
@@ -23271,7 +23271,7 @@
   int16_t __s0 = __p0; \
   int16x4_t __s1 = __p1; \
   int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -26106,7 +26106,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
   float32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 41); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \
 })
 #else
 #define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
@@ -26114,14 +26114,14 @@
   float32x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 41); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
   int32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 34); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \
 })
 #else
 #define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
@@ -26129,14 +26129,14 @@
   int32x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 34); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
   int64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 35); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \
 })
 #else
 #define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
@@ -26144,14 +26144,14 @@
   int64x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 35); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
   int16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 33); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \
 })
 #else
 #define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
@@ -26159,7 +26159,7 @@
   int16x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 33); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \
 })
 #endif
 
@@ -26230,7 +26230,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst1_f32_x2(__p0, __p1) __extension__ ({ \
   float32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 9); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \
 })
 #else
 #define vst1_f32_x2(__p0, __p1) __extension__ ({ \
@@ -26238,14 +26238,14 @@
   float32x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1_s32_x2(__p0, __p1) __extension__ ({ \
   int32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 2); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \
 })
 #else
 #define vst1_s32_x2(__p0, __p1) __extension__ ({ \
@@ -26253,18 +26253,18 @@
   int32x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \
 })
 #endif
 
 #define vst1_s64_x2(__p0, __p1) __extension__ ({ \
   int64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 3); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vst1_s16_x2(__p0, __p1) __extension__ ({ \
   int16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 1); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \
 })
 #else
 #define vst1_s16_x2(__p0, __p1) __extension__ ({ \
@@ -26272,7 +26272,7 @@
   int16x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \
 })
 #endif
 
@@ -26423,7 +26423,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
   float32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 41); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \
 })
 #else
 #define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
@@ -26432,14 +26432,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
   int32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 34); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \
 })
 #else
 #define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
@@ -26448,14 +26448,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
   int64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 35); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \
 })
 #else
 #define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
@@ -26464,14 +26464,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 35); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
   int16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 33); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \
 })
 #else
 #define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
@@ -26480,7 +26480,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \
 })
 #endif
 
@@ -26555,7 +26555,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst1_f32_x3(__p0, __p1) __extension__ ({ \
   float32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 9); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \
 })
 #else
 #define vst1_f32_x3(__p0, __p1) __extension__ ({ \
@@ -26564,14 +26564,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1_s32_x3(__p0, __p1) __extension__ ({ \
   int32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 2); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \
 })
 #else
 #define vst1_s32_x3(__p0, __p1) __extension__ ({ \
@@ -26580,18 +26580,18 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \
 })
 #endif
 
 #define vst1_s64_x3(__p0, __p1) __extension__ ({ \
   int64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vst1_s16_x3(__p0, __p1) __extension__ ({ \
   int16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 1); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \
 })
 #else
 #define vst1_s16_x3(__p0, __p1) __extension__ ({ \
@@ -26600,7 +26600,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \
 })
 #endif
 
@@ -26760,7 +26760,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
   float32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 41); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \
 })
 #else
 #define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
@@ -26770,14 +26770,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
   int32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 34); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \
 })
 #else
 #define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
@@ -26787,14 +26787,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
   int64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 35); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \
 })
 #else
 #define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
@@ -26804,14 +26804,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 35); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
   int16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 33); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \
 })
 #else
 #define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
@@ -26821,7 +26821,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \
 })
 #endif
 
@@ -26900,7 +26900,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst1_f32_x4(__p0, __p1) __extension__ ({ \
   float32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 9); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \
 })
 #else
 #define vst1_f32_x4(__p0, __p1) __extension__ ({ \
@@ -26910,14 +26910,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1_s32_x4(__p0, __p1) __extension__ ({ \
   int32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 2); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \
 })
 #else
 #define vst1_s32_x4(__p0, __p1) __extension__ ({ \
@@ -26927,18 +26927,18 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \
 })
 #endif
 
 #define vst1_s64_x4(__p0, __p1) __extension__ ({ \
   int64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vst1_s16_x4(__p0, __p1) __extension__ ({ \
   int16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 1); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \
 })
 #else
 #define vst1_s16_x4(__p0, __p1) __extension__ ({ \
@@ -26948,7 +26948,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \
 })
 #endif
 
@@ -27075,7 +27075,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_f32(__p0, __p1) __extension__ ({ \
   float32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 41); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \
 })
 #else
 #define vst2q_f32(__p0, __p1) __extension__ ({ \
@@ -27083,14 +27083,14 @@
   float32x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 41); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_s32(__p0, __p1) __extension__ ({ \
   int32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 34); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \
 })
 #else
 #define vst2q_s32(__p0, __p1) __extension__ ({ \
@@ -27098,14 +27098,14 @@
   int32x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 34); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_s16(__p0, __p1) __extension__ ({ \
   int16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 33); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \
 })
 #else
 #define vst2q_s16(__p0, __p1) __extension__ ({ \
@@ -27113,7 +27113,7 @@
   int16x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 33); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \
 })
 #endif
 
@@ -27184,7 +27184,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst2_f32(__p0, __p1) __extension__ ({ \
   float32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 9); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \
 })
 #else
 #define vst2_f32(__p0, __p1) __extension__ ({ \
@@ -27192,14 +27192,14 @@
   float32x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2_s32(__p0, __p1) __extension__ ({ \
   int32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 2); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \
 })
 #else
 #define vst2_s32(__p0, __p1) __extension__ ({ \
@@ -27207,18 +27207,18 @@
   int32x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \
 })
 #endif
 
 #define vst2_s64(__p0, __p1) __extension__ ({ \
   int64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 3); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vst2_s16(__p0, __p1) __extension__ ({ \
   int16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 1); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \
 })
 #else
 #define vst2_s16(__p0, __p1) __extension__ ({ \
@@ -27226,7 +27226,7 @@
   int16x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \
 })
 #endif
 
@@ -27308,7 +27308,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 41); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \
 })
 #else
 #define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
@@ -27316,14 +27316,14 @@
   float32x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 41); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 34); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \
 })
 #else
 #define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
@@ -27331,14 +27331,14 @@
   int32x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 34); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 33); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \
 })
 #else
 #define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
@@ -27346,7 +27346,7 @@
   int16x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 33); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \
 })
 #endif
 
@@ -27413,7 +27413,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 9); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \
 })
 #else
 #define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
@@ -27421,14 +27421,14 @@
   float32x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 9); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 2); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \
 })
 #else
 #define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
@@ -27436,14 +27436,14 @@
   int32x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 2); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 1); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \
 })
 #else
 #define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
@@ -27451,7 +27451,7 @@
   int16x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 1); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \
 })
 #endif
 
@@ -27586,7 +27586,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_f32(__p0, __p1) __extension__ ({ \
   float32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 41); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \
 })
 #else
 #define vst3q_f32(__p0, __p1) __extension__ ({ \
@@ -27595,14 +27595,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_s32(__p0, __p1) __extension__ ({ \
   int32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 34); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \
 })
 #else
 #define vst3q_s32(__p0, __p1) __extension__ ({ \
@@ -27611,14 +27611,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_s16(__p0, __p1) __extension__ ({ \
   int16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 33); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \
 })
 #else
 #define vst3q_s16(__p0, __p1) __extension__ ({ \
@@ -27627,7 +27627,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \
 })
 #endif
 
@@ -27702,7 +27702,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst3_f32(__p0, __p1) __extension__ ({ \
   float32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 9); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \
 })
 #else
 #define vst3_f32(__p0, __p1) __extension__ ({ \
@@ -27711,14 +27711,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3_s32(__p0, __p1) __extension__ ({ \
   int32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 2); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \
 })
 #else
 #define vst3_s32(__p0, __p1) __extension__ ({ \
@@ -27727,18 +27727,18 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \
 })
 #endif
 
 #define vst3_s64(__p0, __p1) __extension__ ({ \
   int64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vst3_s16(__p0, __p1) __extension__ ({ \
   int16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 1); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \
 })
 #else
 #define vst3_s16(__p0, __p1) __extension__ ({ \
@@ -27747,7 +27747,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \
 })
 #endif
 
@@ -27834,7 +27834,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \
 })
 #else
 #define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
@@ -27843,14 +27843,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \
 })
 #else
 #define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
@@ -27859,14 +27859,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \
 })
 #else
 #define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
@@ -27875,7 +27875,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \
 })
 #endif
 
@@ -27946,7 +27946,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \
 })
 #else
 #define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
@@ -27955,14 +27955,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \
 })
 #else
 #define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
@@ -27971,14 +27971,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \
 })
 #else
 #define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
@@ -27987,7 +27987,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \
 })
 #endif
 
@@ -28130,7 +28130,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_f32(__p0, __p1) __extension__ ({ \
   float32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 41); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \
 })
 #else
 #define vst4q_f32(__p0, __p1) __extension__ ({ \
@@ -28140,14 +28140,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_s32(__p0, __p1) __extension__ ({ \
   int32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 34); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \
 })
 #else
 #define vst4q_s32(__p0, __p1) __extension__ ({ \
@@ -28157,14 +28157,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_s16(__p0, __p1) __extension__ ({ \
   int16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 33); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \
 })
 #else
 #define vst4q_s16(__p0, __p1) __extension__ ({ \
@@ -28174,7 +28174,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \
 })
 #endif
 
@@ -28253,7 +28253,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst4_f32(__p0, __p1) __extension__ ({ \
   float32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 9); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \
 })
 #else
 #define vst4_f32(__p0, __p1) __extension__ ({ \
@@ -28263,14 +28263,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4_s32(__p0, __p1) __extension__ ({ \
   int32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 2); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \
 })
 #else
 #define vst4_s32(__p0, __p1) __extension__ ({ \
@@ -28280,18 +28280,18 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \
 })
 #endif
 
 #define vst4_s64(__p0, __p1) __extension__ ({ \
   int64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vst4_s16(__p0, __p1) __extension__ ({ \
   int16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 1); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \
 })
 #else
 #define vst4_s16(__p0, __p1) __extension__ ({ \
@@ -28301,7 +28301,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \
 })
 #endif
 
@@ -28393,7 +28393,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \
 })
 #else
 #define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
@@ -28403,14 +28403,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \
 })
 #else
 #define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
@@ -28420,14 +28420,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \
 })
 #else
 #define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
@@ -28437,7 +28437,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \
 })
 #endif
 
@@ -28512,7 +28512,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
   float32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \
 })
 #else
 #define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
@@ -28522,14 +28522,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
   int32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \
 })
 #else
 #define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
@@ -28539,14 +28539,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
   int16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \
 })
 #else
 #define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
@@ -28556,7 +28556,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \
 })
 #endif
 
@@ -32695,7 +32695,7 @@
 #define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x8x2_t __s1 = __p1; \
   float16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 40); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \
   __ret; \
 })
 #else
@@ -32705,7 +32705,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   float16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 40); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -32717,7 +32717,7 @@
 #define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x4x2_t __s1 = __p1; \
   float16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 8); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \
   __ret; \
 })
 #else
@@ -32727,7 +32727,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   float16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 8); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -32811,7 +32811,7 @@
 #define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x8x3_t __s1 = __p1; \
   float16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \
   __ret; \
 })
 #else
@@ -32822,7 +32822,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   float16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -32835,7 +32835,7 @@
 #define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x4x3_t __s1 = __p1; \
   float16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \
   __ret; \
 })
 #else
@@ -32846,7 +32846,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   float16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -32935,7 +32935,7 @@
 #define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x8x4_t __s1 = __p1; \
   float16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \
   __ret; \
 })
 #else
@@ -32947,7 +32947,7 @@
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
   float16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
@@ -32961,7 +32961,7 @@
 #define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x4x4_t __s1 = __p1; \
   float16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \
   __ret; \
 })
 #else
@@ -32973,7 +32973,7 @@
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
   float16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
@@ -33038,7 +33038,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
   float16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 40); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 40); \
 })
 #else
 #define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
@@ -33046,14 +33046,14 @@
   float16x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 40); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 40); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1_f16_x2(__p0, __p1) __extension__ ({ \
   float16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 8); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 8); \
 })
 #else
 #define vst1_f16_x2(__p0, __p1) __extension__ ({ \
@@ -33061,14 +33061,14 @@
   float16x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 8); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
   float16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 40); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 40); \
 })
 #else
 #define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
@@ -33077,14 +33077,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 40); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1_f16_x3(__p0, __p1) __extension__ ({ \
   float16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 8); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 8); \
 })
 #else
 #define vst1_f16_x3(__p0, __p1) __extension__ ({ \
@@ -33093,14 +33093,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 8); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
   float16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 40); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 40); \
 })
 #else
 #define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
@@ -33110,14 +33110,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 40); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst1_f16_x4(__p0, __p1) __extension__ ({ \
   float16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 8); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 8); \
 })
 #else
 #define vst1_f16_x4(__p0, __p1) __extension__ ({ \
@@ -33127,14 +33127,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 8); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_f16(__p0, __p1) __extension__ ({ \
   float16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 40); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 40); \
 })
 #else
 #define vst2q_f16(__p0, __p1) __extension__ ({ \
@@ -33142,14 +33142,14 @@
   float16x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 40); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 40); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2_f16(__p0, __p1) __extension__ ({ \
   float16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 8); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 8); \
 })
 #else
 #define vst2_f16(__p0, __p1) __extension__ ({ \
@@ -33157,14 +33157,14 @@
   float16x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 8); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 40); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \
 })
 #else
 #define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
@@ -33172,14 +33172,14 @@
   float16x8x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 40); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 8); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \
 })
 #else
 #define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
@@ -33187,14 +33187,14 @@
   float16x4x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 8); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_f16(__p0, __p1) __extension__ ({ \
   float16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 40); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 40); \
 })
 #else
 #define vst3q_f16(__p0, __p1) __extension__ ({ \
@@ -33203,14 +33203,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 40); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3_f16(__p0, __p1) __extension__ ({ \
   float16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 8); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 8); \
 })
 #else
 #define vst3_f16(__p0, __p1) __extension__ ({ \
@@ -33219,14 +33219,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 8); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \
 })
 #else
 #define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
@@ -33235,14 +33235,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \
 })
 #else
 #define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
@@ -33251,14 +33251,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_f16(__p0, __p1) __extension__ ({ \
   float16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 40); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 40); \
 })
 #else
 #define vst4q_f16(__p0, __p1) __extension__ ({ \
@@ -33268,14 +33268,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 40); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4_f16(__p0, __p1) __extension__ ({ \
   float16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 8); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 8); \
 })
 #else
 #define vst4_f16(__p0, __p1) __extension__ ({ \
@@ -33285,14 +33285,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 8); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \
 })
 #else
 #define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
@@ -33302,14 +33302,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
   float16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \
 })
 #else
 #define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
@@ -33319,7 +33319,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \
 })
 #endif
 
@@ -33652,7 +33652,7 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
   uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
+  __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__p0, __p1, __p2);
   return __ret;
 }
 #else
@@ -33660,7 +33660,7 @@
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
   uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
+  __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__rev0, __p1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
@@ -33674,7 +33674,7 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
   uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
+  __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__p0, __p1, __p2);
   return __ret;
 }
 #else
@@ -33682,7 +33682,7 @@
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
   uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
+  __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__rev0, __p1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
@@ -33691,7 +33691,7 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
   uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
+  __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__p0, __p1, __p2);
   return __ret;
 }
 #else
@@ -33699,7 +33699,7 @@
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
   uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
+  __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__rev0, __p1, __rev2);
   __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
   return __ret;
 }
@@ -39200,7 +39200,7 @@
 #define vduph_lane_f16(__p0, __p1) __extension__ ({ \
   float16x4_t __s0 = __p0; \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_lane_f16((int8x8_t)__s0, __p1); \
+  __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -39208,7 +39208,7 @@
   float16x4_t __s0 = __p0; \
   float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_lane_f16((int8x8_t)__rev0, __p1); \
+  __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -39217,7 +39217,7 @@
 #define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
   float16x8_t __s0 = __p0; \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((int8x16_t)__s0, __p1); \
+  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -39225,7 +39225,7 @@
   float16x8_t __s0 = __p0; \
   float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((int8x16_t)__rev0, __p1); \
+  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -39236,7 +39236,7 @@
   float16_t __s1 = __p1; \
   float16x4_t __s2 = __p2; \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (int8x8_t)__s2, __p3); \
+  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \
   __ret; \
 })
 #else
@@ -39246,7 +39246,7 @@
   float16x4_t __s2 = __p2; \
   float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (int8x8_t)__rev2, __p3); \
+  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__rev2, __p3); \
   __ret; \
 })
 #define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
@@ -39254,7 +39254,7 @@
   float16_t __s1 = __p1; \
   float16x4_t __s2 = __p2; \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (int8x8_t)__s2, __p3); \
+  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \
   __ret; \
 })
 #endif
@@ -39329,7 +39329,7 @@
   float16_t __s1 = __p1; \
   float16x8_t __s2 = __p2; \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (int8x16_t)__s2, __p3); \
+  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \
   __ret; \
 })
 #else
@@ -39339,7 +39339,7 @@
   float16x8_t __s2 = __p2; \
   float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (int8x16_t)__rev2, __p3); \
+  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__rev2, __p3); \
   __ret; \
 })
 #define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
@@ -39347,7 +39347,7 @@
   float16_t __s1 = __p1; \
   float16x8_t __s2 = __p2; \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (int8x16_t)__s2, __p3); \
+  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \
   __ret; \
 })
 #endif
@@ -39873,7 +39873,7 @@
   float16_t __s0 = __p0; \
   float16x4_t __s1 = __p1; \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -39882,7 +39882,7 @@
   float16x4_t __s1 = __p1; \
   float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (int8x8_t)__rev1, __p2); \
+  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__rev1, __p2); \
   __ret; \
 })
 #endif
@@ -39934,7 +39934,7 @@
   float16_t __s0 = __p0; \
   float16x8_t __s1 = __p1; \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -39943,7 +39943,7 @@
   float16x8_t __s1 = __p1; \
   float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
   float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__rev1, __p2); \
   __ret; \
 })
 #endif
@@ -41173,14 +41173,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlvq_u8((int8x16_t)__p0);
+  __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__p0);
   return __ret;
 }
 #else
 __ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlvq_u8((int8x16_t)__rev0);
+  __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__rev0);
   return __ret;
 }
 #endif
@@ -41188,14 +41188,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
   uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlvq_u32((int8x16_t)__p0);
+  __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__p0);
   return __ret;
 }
 #else
 __ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlvq_u32((int8x16_t)__rev0);
+  __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__rev0);
   return __ret;
 }
 #endif
@@ -41203,14 +41203,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlvq_u16((int8x16_t)__p0);
+  __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__p0);
   return __ret;
 }
 #else
 __ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlvq_u16((int8x16_t)__rev0);
+  __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__rev0);
   return __ret;
 }
 #endif
@@ -41218,14 +41218,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16_t vaddlvq_s8(int8x16_t __p0) {
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlvq_s8((int8x16_t)__p0);
+  __ret = (int16_t) __builtin_neon_vaddlvq_s8(__p0);
   return __ret;
 }
 #else
 __ai int16_t vaddlvq_s8(int8x16_t __p0) {
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlvq_s8((int8x16_t)__rev0);
+  __ret = (int16_t) __builtin_neon_vaddlvq_s8(__rev0);
   return __ret;
 }
 #endif
@@ -41233,14 +41233,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int64_t vaddlvq_s32(int32x4_t __p0) {
   int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlvq_s32((int8x16_t)__p0);
+  __ret = (int64_t) __builtin_neon_vaddlvq_s32(__p0);
   return __ret;
 }
 #else
 __ai int64_t vaddlvq_s32(int32x4_t __p0) {
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlvq_s32((int8x16_t)__rev0);
+  __ret = (int64_t) __builtin_neon_vaddlvq_s32(__rev0);
   return __ret;
 }
 #endif
@@ -41248,14 +41248,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32_t vaddlvq_s16(int16x8_t __p0) {
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlvq_s16((int8x16_t)__p0);
+  __ret = (int32_t) __builtin_neon_vaddlvq_s16(__p0);
   return __ret;
 }
 #else
 __ai int32_t vaddlvq_s16(int16x8_t __p0) {
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlvq_s16((int8x16_t)__rev0);
+  __ret = (int32_t) __builtin_neon_vaddlvq_s16(__rev0);
   return __ret;
 }
 #endif
@@ -41263,14 +41263,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint16_t vaddlv_u8(uint8x8_t __p0) {
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlv_u8((int8x8_t)__p0);
+  __ret = (uint16_t) __builtin_neon_vaddlv_u8(__p0);
   return __ret;
 }
 #else
 __ai uint16_t vaddlv_u8(uint8x8_t __p0) {
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlv_u8((int8x8_t)__rev0);
+  __ret = (uint16_t) __builtin_neon_vaddlv_u8(__rev0);
   return __ret;
 }
 #endif
@@ -41278,14 +41278,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint64_t vaddlv_u32(uint32x2_t __p0) {
   uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlv_u32((int8x8_t)__p0);
+  __ret = (uint64_t) __builtin_neon_vaddlv_u32(__p0);
   return __ret;
 }
 #else
 __ai uint64_t vaddlv_u32(uint32x2_t __p0) {
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlv_u32((int8x8_t)__rev0);
+  __ret = (uint64_t) __builtin_neon_vaddlv_u32(__rev0);
   return __ret;
 }
 #endif
@@ -41293,14 +41293,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32_t vaddlv_u16(uint16x4_t __p0) {
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlv_u16((int8x8_t)__p0);
+  __ret = (uint32_t) __builtin_neon_vaddlv_u16(__p0);
   return __ret;
 }
 #else
 __ai uint32_t vaddlv_u16(uint16x4_t __p0) {
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlv_u16((int8x8_t)__rev0);
+  __ret = (uint32_t) __builtin_neon_vaddlv_u16(__rev0);
   return __ret;
 }
 #endif
@@ -41308,14 +41308,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16_t vaddlv_s8(int8x8_t __p0) {
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlv_s8((int8x8_t)__p0);
+  __ret = (int16_t) __builtin_neon_vaddlv_s8(__p0);
   return __ret;
 }
 #else
 __ai int16_t vaddlv_s8(int8x8_t __p0) {
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlv_s8((int8x8_t)__rev0);
+  __ret = (int16_t) __builtin_neon_vaddlv_s8(__rev0);
   return __ret;
 }
 #endif
@@ -41323,14 +41323,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int64_t vaddlv_s32(int32x2_t __p0) {
   int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlv_s32((int8x8_t)__p0);
+  __ret = (int64_t) __builtin_neon_vaddlv_s32(__p0);
   return __ret;
 }
 #else
 __ai int64_t vaddlv_s32(int32x2_t __p0) {
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlv_s32((int8x8_t)__rev0);
+  __ret = (int64_t) __builtin_neon_vaddlv_s32(__rev0);
   return __ret;
 }
 #endif
@@ -41338,14 +41338,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32_t vaddlv_s16(int16x4_t __p0) {
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlv_s16((int8x8_t)__p0);
+  __ret = (int32_t) __builtin_neon_vaddlv_s16(__p0);
   return __ret;
 }
 #else
 __ai int32_t vaddlv_s16(int16x4_t __p0) {
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlv_s16((int8x8_t)__rev0);
+  __ret = (int32_t) __builtin_neon_vaddlv_s16(__rev0);
   return __ret;
 }
 #endif
@@ -41353,14 +41353,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint8_t vaddvq_u8(uint8x16_t __p0) {
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddvq_u8((int8x16_t)__p0);
+  __ret = (uint8_t) __builtin_neon_vaddvq_u8(__p0);
   return __ret;
 }
 #else
 __ai uint8_t vaddvq_u8(uint8x16_t __p0) {
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddvq_u8((int8x16_t)__rev0);
+  __ret = (uint8_t) __builtin_neon_vaddvq_u8(__rev0);
   return __ret;
 }
 #endif
@@ -41368,14 +41368,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32_t vaddvq_u32(uint32x4_t __p0) {
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddvq_u32((int8x16_t)__p0);
+  __ret = (uint32_t) __builtin_neon_vaddvq_u32(__p0);
   return __ret;
 }
 #else
 __ai uint32_t vaddvq_u32(uint32x4_t __p0) {
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddvq_u32((int8x16_t)__rev0);
+  __ret = (uint32_t) __builtin_neon_vaddvq_u32(__rev0);
   return __ret;
 }
 #endif
@@ -41383,14 +41383,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint64_t vaddvq_u64(uint64x2_t __p0) {
   uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddvq_u64((int8x16_t)__p0);
+  __ret = (uint64_t) __builtin_neon_vaddvq_u64(__p0);
   return __ret;
 }
 #else
 __ai uint64_t vaddvq_u64(uint64x2_t __p0) {
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddvq_u64((int8x16_t)__rev0);
+  __ret = (uint64_t) __builtin_neon_vaddvq_u64(__rev0);
   return __ret;
 }
 #endif
@@ -41398,14 +41398,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint16_t vaddvq_u16(uint16x8_t __p0) {
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddvq_u16((int8x16_t)__p0);
+  __ret = (uint16_t) __builtin_neon_vaddvq_u16(__p0);
   return __ret;
 }
 #else
 __ai uint16_t vaddvq_u16(uint16x8_t __p0) {
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddvq_u16((int8x16_t)__rev0);
+  __ret = (uint16_t) __builtin_neon_vaddvq_u16(__rev0);
   return __ret;
 }
 #endif
@@ -41413,14 +41413,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int8_t vaddvq_s8(int8x16_t __p0) {
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddvq_s8((int8x16_t)__p0);
+  __ret = (int8_t) __builtin_neon_vaddvq_s8(__p0);
   return __ret;
 }
 #else
 __ai int8_t vaddvq_s8(int8x16_t __p0) {
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddvq_s8((int8x16_t)__rev0);
+  __ret = (int8_t) __builtin_neon_vaddvq_s8(__rev0);
   return __ret;
 }
 #endif
@@ -41428,14 +41428,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float64_t vaddvq_f64(float64x2_t __p0) {
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vaddvq_f64((int8x16_t)__p0);
+  __ret = (float64_t) __builtin_neon_vaddvq_f64(__p0);
   return __ret;
 }
 #else
 __ai float64_t vaddvq_f64(float64x2_t __p0) {
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vaddvq_f64((int8x16_t)__rev0);
+  __ret = (float64_t) __builtin_neon_vaddvq_f64(__rev0);
   return __ret;
 }
 #endif
@@ -41443,14 +41443,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vaddvq_f32(float32x4_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddvq_f32((int8x16_t)__p0);
+  __ret = (float32_t) __builtin_neon_vaddvq_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vaddvq_f32(float32x4_t __p0) {
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddvq_f32((int8x16_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vaddvq_f32(__rev0);
   return __ret;
 }
 #endif
@@ -41458,14 +41458,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32_t vaddvq_s32(int32x4_t __p0) {
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddvq_s32((int8x16_t)__p0);
+  __ret = (int32_t) __builtin_neon_vaddvq_s32(__p0);
   return __ret;
 }
 #else
 __ai int32_t vaddvq_s32(int32x4_t __p0) {
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddvq_s32((int8x16_t)__rev0);
+  __ret = (int32_t) __builtin_neon_vaddvq_s32(__rev0);
   return __ret;
 }
 #endif
@@ -41473,14 +41473,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int64_t vaddvq_s64(int64x2_t __p0) {
   int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddvq_s64((int8x16_t)__p0);
+  __ret = (int64_t) __builtin_neon_vaddvq_s64(__p0);
   return __ret;
 }
 #else
 __ai int64_t vaddvq_s64(int64x2_t __p0) {
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddvq_s64((int8x16_t)__rev0);
+  __ret = (int64_t) __builtin_neon_vaddvq_s64(__rev0);
   return __ret;
 }
 #endif
@@ -41488,14 +41488,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16_t vaddvq_s16(int16x8_t __p0) {
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddvq_s16((int8x16_t)__p0);
+  __ret = (int16_t) __builtin_neon_vaddvq_s16(__p0);
   return __ret;
 }
 #else
 __ai int16_t vaddvq_s16(int16x8_t __p0) {
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddvq_s16((int8x16_t)__rev0);
+  __ret = (int16_t) __builtin_neon_vaddvq_s16(__rev0);
   return __ret;
 }
 #endif
@@ -41503,14 +41503,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint8_t vaddv_u8(uint8x8_t __p0) {
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddv_u8((int8x8_t)__p0);
+  __ret = (uint8_t) __builtin_neon_vaddv_u8(__p0);
   return __ret;
 }
 #else
 __ai uint8_t vaddv_u8(uint8x8_t __p0) {
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddv_u8((int8x8_t)__rev0);
+  __ret = (uint8_t) __builtin_neon_vaddv_u8(__rev0);
   return __ret;
 }
 #endif
@@ -41518,14 +41518,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32_t vaddv_u32(uint32x2_t __p0) {
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddv_u32((int8x8_t)__p0);
+  __ret = (uint32_t) __builtin_neon_vaddv_u32(__p0);
   return __ret;
 }
 #else
 __ai uint32_t vaddv_u32(uint32x2_t __p0) {
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddv_u32((int8x8_t)__rev0);
+  __ret = (uint32_t) __builtin_neon_vaddv_u32(__rev0);
   return __ret;
 }
 #endif
@@ -41533,14 +41533,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint16_t vaddv_u16(uint16x4_t __p0) {
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddv_u16((int8x8_t)__p0);
+  __ret = (uint16_t) __builtin_neon_vaddv_u16(__p0);
   return __ret;
 }
 #else
 __ai uint16_t vaddv_u16(uint16x4_t __p0) {
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddv_u16((int8x8_t)__rev0);
+  __ret = (uint16_t) __builtin_neon_vaddv_u16(__rev0);
   return __ret;
 }
 #endif
@@ -41548,14 +41548,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int8_t vaddv_s8(int8x8_t __p0) {
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddv_s8((int8x8_t)__p0);
+  __ret = (int8_t) __builtin_neon_vaddv_s8(__p0);
   return __ret;
 }
 #else
 __ai int8_t vaddv_s8(int8x8_t __p0) {
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddv_s8((int8x8_t)__rev0);
+  __ret = (int8_t) __builtin_neon_vaddv_s8(__rev0);
   return __ret;
 }
 #endif
@@ -41563,14 +41563,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vaddv_f32(float32x2_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddv_f32((int8x8_t)__p0);
+  __ret = (float32_t) __builtin_neon_vaddv_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vaddv_f32(float32x2_t __p0) {
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddv_f32((int8x8_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vaddv_f32(__rev0);
   return __ret;
 }
 #endif
@@ -41578,14 +41578,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32_t vaddv_s32(int32x2_t __p0) {
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddv_s32((int8x8_t)__p0);
+  __ret = (int32_t) __builtin_neon_vaddv_s32(__p0);
   return __ret;
 }
 #else
 __ai int32_t vaddv_s32(int32x2_t __p0) {
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddv_s32((int8x8_t)__rev0);
+  __ret = (int32_t) __builtin_neon_vaddv_s32(__rev0);
   return __ret;
 }
 #endif
@@ -41593,14 +41593,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16_t vaddv_s16(int16x4_t __p0) {
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddv_s16((int8x8_t)__p0);
+  __ret = (int16_t) __builtin_neon_vaddv_s16(__p0);
   return __ret;
 }
 #else
 __ai int16_t vaddv_s16(int16x4_t __p0) {
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddv_s16((int8x8_t)__rev0);
+  __ret = (int16_t) __builtin_neon_vaddv_s16(__rev0);
   return __ret;
 }
 #endif
@@ -44852,7 +44852,7 @@
 #define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
   poly8x8_t __s0 = __p0; \
   poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
+  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -44860,7 +44860,7 @@
   poly8x8_t __s0 = __p0; \
   poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
+  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -44869,7 +44869,7 @@
 #define vduph_lane_p16(__p0, __p1) __extension__ ({ \
   poly16x4_t __s0 = __p0; \
   poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
+  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -44877,7 +44877,7 @@
   poly16x4_t __s0 = __p0; \
   poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
+  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -44903,7 +44903,7 @@
 #define vdups_lane_u32(__p0, __p1) __extension__ ({ \
   uint32x2_t __s0 = __p0; \
   uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__s0, __p1); \
+  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -44911,7 +44911,7 @@
   uint32x2_t __s0 = __p0; \
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__rev0, __p1); \
+  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -44919,14 +44919,14 @@
 #define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
   uint64x1_t __s0 = __p0; \
   uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
+  __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vduph_lane_u16(__p0, __p1) __extension__ ({ \
   uint16x4_t __s0 = __p0; \
   uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
+  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -44934,7 +44934,7 @@
   uint16x4_t __s0 = __p0; \
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
+  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -44959,14 +44959,14 @@
 #define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
   float64x1_t __s0 = __p0; \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_lane_f64((int8x8_t)__s0, __p1); \
+  __ret = (float64_t) __builtin_neon_vdupd_lane_f64((float64x1_t)__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vdups_lane_f32(__p0, __p1) __extension__ ({ \
   float32x2_t __s0 = __p0; \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_lane_f32((int8x8_t)__s0, __p1); \
+  __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -44974,7 +44974,7 @@
   float32x2_t __s0 = __p0; \
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_lane_f32((int8x8_t)__rev0, __p1); \
+  __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -44983,7 +44983,7 @@
 #define vdups_lane_s32(__p0, __p1) __extension__ ({ \
   int32x2_t __s0 = __p0; \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__s0, __p1); \
+  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -44991,7 +44991,7 @@
   int32x2_t __s0 = __p0; \
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__rev0, __p1); \
+  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -44999,14 +44999,14 @@
 #define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
   int64x1_t __s0 = __p0; \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
+  __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vduph_lane_s16(__p0, __p1) __extension__ ({ \
   int16x4_t __s0 = __p0; \
   int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
+  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45014,7 +45014,7 @@
   int16x4_t __s0 = __p0; \
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
+  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45105,7 +45105,7 @@
 #define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
   poly8x16_t __s0 = __p0; \
   poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
+  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45113,7 +45113,7 @@
   poly8x16_t __s0 = __p0; \
   poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
+  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45122,7 +45122,7 @@
 #define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
   poly16x8_t __s0 = __p0; \
   poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
+  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45130,7 +45130,7 @@
   poly16x8_t __s0 = __p0; \
   poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
+  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45156,7 +45156,7 @@
 #define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
   uint32x4_t __s0 = __p0; \
   uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__s0, __p1); \
+  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45164,7 +45164,7 @@
   uint32x4_t __s0 = __p0; \
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__rev0, __p1); \
+  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45173,7 +45173,7 @@
 #define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
   uint64x2_t __s0 = __p0; \
   uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__s0, __p1); \
+  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45181,7 +45181,7 @@
   uint64x2_t __s0 = __p0; \
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__rev0, __p1); \
+  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45190,7 +45190,7 @@
 #define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
   uint16x8_t __s0 = __p0; \
   uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
+  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45198,7 +45198,7 @@
   uint16x8_t __s0 = __p0; \
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
+  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45224,7 +45224,7 @@
 #define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
   float64x2_t __s0 = __p0; \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((int8x16_t)__s0, __p1); \
+  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45232,7 +45232,7 @@
   float64x2_t __s0 = __p0; \
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((int8x16_t)__rev0, __p1); \
+  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45241,7 +45241,7 @@
 #define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
   float32x4_t __s0 = __p0; \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((int8x16_t)__s0, __p1); \
+  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45249,7 +45249,7 @@
   float32x4_t __s0 = __p0; \
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((int8x16_t)__rev0, __p1); \
+  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45258,7 +45258,7 @@
 #define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
   int32x4_t __s0 = __p0; \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__s0, __p1); \
+  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45266,7 +45266,7 @@
   int32x4_t __s0 = __p0; \
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__rev0, __p1); \
+  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45275,7 +45275,7 @@
 #define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
   int64x2_t __s0 = __p0; \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__s0, __p1); \
+  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45283,7 +45283,7 @@
   int64x2_t __s0 = __p0; \
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__rev0, __p1); \
+  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45292,7 +45292,7 @@
 #define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
   int16x8_t __s0 = __p0; \
   int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
+  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -45300,7 +45300,7 @@
   int16x8_t __s0 = __p0; \
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
   int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
+  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \
   __ret; \
 })
 #endif
@@ -45934,7 +45934,7 @@
   float64_t __s1 = __p1; \
   float64x1_t __s2 = __p2; \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \
+  __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (float64x1_t)__s2, __p3); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
@@ -45943,7 +45943,7 @@
   float32_t __s1 = __p1; \
   float32x2_t __s2 = __p2; \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__s2, __p3); \
+  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \
   __ret; \
 })
 #else
@@ -45953,7 +45953,7 @@
   float32x2_t __s2 = __p2; \
   float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__rev2, __p3); \
+  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__rev2, __p3); \
   __ret; \
 })
 #define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
@@ -45961,7 +45961,7 @@
   float32_t __s1 = __p1; \
   float32x2_t __s2 = __p2; \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__s2, __p3); \
+  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \
   __ret; \
 })
 #endif
@@ -46075,7 +46075,7 @@
   float64_t __s1 = __p1; \
   float64x2_t __s2 = __p2; \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__s2, __p3); \
+  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \
   __ret; \
 })
 #else
@@ -46085,7 +46085,7 @@
   float64x2_t __s2 = __p2; \
   float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__rev2, __p3); \
+  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__rev2, __p3); \
   __ret; \
 })
 #define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
@@ -46093,7 +46093,7 @@
   float64_t __s1 = __p1; \
   float64x2_t __s2 = __p2; \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__s2, __p3); \
+  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \
   __ret; \
 })
 #endif
@@ -46104,7 +46104,7 @@
   float32_t __s1 = __p1; \
   float32x4_t __s2 = __p2; \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__s2, __p3); \
+  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \
   __ret; \
 })
 #else
@@ -46114,7 +46114,7 @@
   float32x4_t __s2 = __p2; \
   float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__rev2, __p3); \
+  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__rev2, __p3); \
   __ret; \
 })
 #define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
@@ -46122,7 +46122,7 @@
   float32_t __s1 = __p1; \
   float32x4_t __s2 = __p2; \
   float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__s2, __p3); \
+  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \
   __ret; \
 })
 #endif
@@ -46634,14 +46634,14 @@
 #define vget_lane_p64(__p0, __p1) __extension__ ({ \
   poly64x1_t __s0 = __p0; \
   poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
+  __ret = (poly64_t) __builtin_neon_vget_lane_i64((poly64x1_t)__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
 #define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
   poly64x2_t __s0 = __p0; \
   poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -46649,13 +46649,13 @@
   poly64x2_t __s0 = __p0; \
   poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
+  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \
   poly64x2_t __s0 = __p0; \
   poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
+  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -46664,7 +46664,7 @@
 #define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
   float64x2_t __s0 = __p0; \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__s0, __p1); \
+  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \
   __ret; \
 })
 #else
@@ -46672,13 +46672,13 @@
   float64x2_t __s0 = __p0; \
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__rev0, __p1); \
+  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__rev0, __p1); \
   __ret; \
 })
 #define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \
   float64x2_t __s0 = __p0; \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__s0, __p1); \
+  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \
   __ret; \
 })
 #endif
@@ -46686,7 +46686,7 @@
 #define vget_lane_f64(__p0, __p1) __extension__ ({ \
   float64x1_t __s0 = __p0; \
   float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \
+  __ret = (float64_t) __builtin_neon_vget_lane_f64((float64x1_t)__s0, __p1); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
@@ -47227,7 +47227,7 @@
 #define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x2x2_t __s1 = __p1; \
   float64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 42); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \
   __ret; \
 })
 #else
@@ -47237,7 +47237,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   float64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 42); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -47249,7 +47249,7 @@
 #define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x2x2_t __s1 = __p1; \
   int64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 35); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \
   __ret; \
 })
 #else
@@ -47259,7 +47259,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   int64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 35); \
+  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -47276,13 +47276,13 @@
 #define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x1x2_t __s1 = __p1; \
   float64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 10); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \
   __ret; \
 })
 #define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x1x2_t __s1 = __p1; \
   int64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 3); \
+  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \
   __ret; \
 })
 #define vld3_p64(__p0) __extension__ ({ \
@@ -47543,7 +47543,7 @@
 #define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x2x3_t __s1 = __p1; \
   float64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 42); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \
   __ret; \
 })
 #else
@@ -47554,7 +47554,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   float64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 42); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -47567,7 +47567,7 @@
 #define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x2x3_t __s1 = __p1; \
   int64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 35); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \
   __ret; \
 })
 #else
@@ -47578,7 +47578,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   int64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 35); \
+  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -47596,13 +47596,13 @@
 #define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x1x3_t __s1 = __p1; \
   float64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \
   __ret; \
 })
 #define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x1x3_t __s1 = __p1; \
   int64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
+  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \
   __ret; \
 })
 #define vld4_p64(__p0) __extension__ ({ \
@@ -47879,7 +47879,7 @@
 #define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x2x4_t __s1 = __p1; \
   float64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 42); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \
   __ret; \
 })
 #else
@@ -47891,7 +47891,7 @@
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
   float64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 42); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -47905,7 +47905,7 @@
 #define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x2x4_t __s1 = __p1; \
   int64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 35); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \
   __ret; \
 })
 #else
@@ -47917,7 +47917,7 @@
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
   int64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 35); \
+  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \
  \
   __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
   __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
@@ -47936,13 +47936,13 @@
 #define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x1x4_t __s1 = __p1; \
   float64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \
   __ret; \
 })
 #define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x1x4_t __s1 = __p1; \
   int64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
+  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \
   __ret; \
 })
 #define vldrq_p128(__p0) __extension__ ({ \
@@ -47975,14 +47975,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64((int8x16_t)__p0);
+  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__p0);
   return __ret;
 }
 #else
 __ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64((int8x16_t)__rev0);
+  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__rev0);
   return __ret;
 }
 #endif
@@ -47990,14 +47990,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32((int8x16_t)__p0);
+  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32((int8x16_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__rev0);
   return __ret;
 }
 #endif
@@ -48005,14 +48005,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vmaxnmv_f32(float32x2_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmv_f32((int8x8_t)__p0);
+  __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vmaxnmv_f32(float32x2_t __p0) {
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmv_f32((int8x8_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__rev0);
   return __ret;
 }
 #endif
@@ -48020,14 +48020,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxvq_u8((int8x16_t)__p0);
+  __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__p0);
   return __ret;
 }
 #else
 __ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxvq_u8((int8x16_t)__rev0);
+  __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__rev0);
   return __ret;
 }
 #endif
@@ -48035,14 +48035,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxvq_u32((int8x16_t)__p0);
+  __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__p0);
   return __ret;
 }
 #else
 __ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxvq_u32((int8x16_t)__rev0);
+  __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__rev0);
   return __ret;
 }
 #endif
@@ -48050,14 +48050,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxvq_u16((int8x16_t)__p0);
+  __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__p0);
   return __ret;
 }
 #else
 __ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxvq_u16((int8x16_t)__rev0);
+  __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__rev0);
   return __ret;
 }
 #endif
@@ -48065,14 +48065,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int8_t vmaxvq_s8(int8x16_t __p0) {
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxvq_s8((int8x16_t)__p0);
+  __ret = (int8_t) __builtin_neon_vmaxvq_s8(__p0);
   return __ret;
 }
 #else
 __ai int8_t vmaxvq_s8(int8x16_t __p0) {
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxvq_s8((int8x16_t)__rev0);
+  __ret = (int8_t) __builtin_neon_vmaxvq_s8(__rev0);
   return __ret;
 }
 #endif
@@ -48080,14 +48080,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float64_t vmaxvq_f64(float64x2_t __p0) {
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxvq_f64((int8x16_t)__p0);
+  __ret = (float64_t) __builtin_neon_vmaxvq_f64(__p0);
   return __ret;
 }
 #else
 __ai float64_t vmaxvq_f64(float64x2_t __p0) {
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxvq_f64((int8x16_t)__rev0);
+  __ret = (float64_t) __builtin_neon_vmaxvq_f64(__rev0);
   return __ret;
 }
 #endif
@@ -48095,14 +48095,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vmaxvq_f32(float32x4_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxvq_f32((int8x16_t)__p0);
+  __ret = (float32_t) __builtin_neon_vmaxvq_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vmaxvq_f32(float32x4_t __p0) {
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxvq_f32((int8x16_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vmaxvq_f32(__rev0);
   return __ret;
 }
 #endif
@@ -48110,14 +48110,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32_t vmaxvq_s32(int32x4_t __p0) {
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxvq_s32((int8x16_t)__p0);
+  __ret = (int32_t) __builtin_neon_vmaxvq_s32(__p0);
   return __ret;
 }
 #else
 __ai int32_t vmaxvq_s32(int32x4_t __p0) {
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxvq_s32((int8x16_t)__rev0);
+  __ret = (int32_t) __builtin_neon_vmaxvq_s32(__rev0);
   return __ret;
 }
 #endif
@@ -48125,14 +48125,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16_t vmaxvq_s16(int16x8_t __p0) {
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxvq_s16((int8x16_t)__p0);
+  __ret = (int16_t) __builtin_neon_vmaxvq_s16(__p0);
   return __ret;
 }
 #else
 __ai int16_t vmaxvq_s16(int16x8_t __p0) {
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxvq_s16((int8x16_t)__rev0);
+  __ret = (int16_t) __builtin_neon_vmaxvq_s16(__rev0);
   return __ret;
 }
 #endif
@@ -48140,14 +48140,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint8_t vmaxv_u8(uint8x8_t __p0) {
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxv_u8((int8x8_t)__p0);
+  __ret = (uint8_t) __builtin_neon_vmaxv_u8(__p0);
   return __ret;
 }
 #else
 __ai uint8_t vmaxv_u8(uint8x8_t __p0) {
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxv_u8((int8x8_t)__rev0);
+  __ret = (uint8_t) __builtin_neon_vmaxv_u8(__rev0);
   return __ret;
 }
 #endif
@@ -48155,14 +48155,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32_t vmaxv_u32(uint32x2_t __p0) {
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxv_u32((int8x8_t)__p0);
+  __ret = (uint32_t) __builtin_neon_vmaxv_u32(__p0);
   return __ret;
 }
 #else
 __ai uint32_t vmaxv_u32(uint32x2_t __p0) {
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxv_u32((int8x8_t)__rev0);
+  __ret = (uint32_t) __builtin_neon_vmaxv_u32(__rev0);
   return __ret;
 }
 #endif
@@ -48170,14 +48170,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint16_t vmaxv_u16(uint16x4_t __p0) {
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxv_u16((int8x8_t)__p0);
+  __ret = (uint16_t) __builtin_neon_vmaxv_u16(__p0);
   return __ret;
 }
 #else
 __ai uint16_t vmaxv_u16(uint16x4_t __p0) {
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxv_u16((int8x8_t)__rev0);
+  __ret = (uint16_t) __builtin_neon_vmaxv_u16(__rev0);
   return __ret;
 }
 #endif
@@ -48185,14 +48185,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int8_t vmaxv_s8(int8x8_t __p0) {
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxv_s8((int8x8_t)__p0);
+  __ret = (int8_t) __builtin_neon_vmaxv_s8(__p0);
   return __ret;
 }
 #else
 __ai int8_t vmaxv_s8(int8x8_t __p0) {
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxv_s8((int8x8_t)__rev0);
+  __ret = (int8_t) __builtin_neon_vmaxv_s8(__rev0);
   return __ret;
 }
 #endif
@@ -48200,14 +48200,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vmaxv_f32(float32x2_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxv_f32((int8x8_t)__p0);
+  __ret = (float32_t) __builtin_neon_vmaxv_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vmaxv_f32(float32x2_t __p0) {
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxv_f32((int8x8_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vmaxv_f32(__rev0);
   return __ret;
 }
 #endif
@@ -48215,14 +48215,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32_t vmaxv_s32(int32x2_t __p0) {
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxv_s32((int8x8_t)__p0);
+  __ret = (int32_t) __builtin_neon_vmaxv_s32(__p0);
   return __ret;
 }
 #else
 __ai int32_t vmaxv_s32(int32x2_t __p0) {
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxv_s32((int8x8_t)__rev0);
+  __ret = (int32_t) __builtin_neon_vmaxv_s32(__rev0);
   return __ret;
 }
 #endif
@@ -48230,14 +48230,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16_t vmaxv_s16(int16x4_t __p0) {
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxv_s16((int8x8_t)__p0);
+  __ret = (int16_t) __builtin_neon_vmaxv_s16(__p0);
   return __ret;
 }
 #else
 __ai int16_t vmaxv_s16(int16x4_t __p0) {
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxv_s16((int8x8_t)__rev0);
+  __ret = (int16_t) __builtin_neon_vmaxv_s16(__rev0);
   return __ret;
 }
 #endif
@@ -48267,14 +48267,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float64_t vminnmvq_f64(float64x2_t __p0) {
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminnmvq_f64((int8x16_t)__p0);
+  __ret = (float64_t) __builtin_neon_vminnmvq_f64(__p0);
   return __ret;
 }
 #else
 __ai float64_t vminnmvq_f64(float64x2_t __p0) {
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminnmvq_f64((int8x16_t)__rev0);
+  __ret = (float64_t) __builtin_neon_vminnmvq_f64(__rev0);
   return __ret;
 }
 #endif
@@ -48282,14 +48282,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vminnmvq_f32(float32x4_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmvq_f32((int8x16_t)__p0);
+  __ret = (float32_t) __builtin_neon_vminnmvq_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vminnmvq_f32(float32x4_t __p0) {
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmvq_f32((int8x16_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vminnmvq_f32(__rev0);
   return __ret;
 }
 #endif
@@ -48297,14 +48297,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vminnmv_f32(float32x2_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmv_f32((int8x8_t)__p0);
+  __ret = (float32_t) __builtin_neon_vminnmv_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vminnmv_f32(float32x2_t __p0) {
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmv_f32((int8x8_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vminnmv_f32(__rev0);
   return __ret;
 }
 #endif
@@ -48312,14 +48312,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint8_t vminvq_u8(uint8x16_t __p0) {
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminvq_u8((int8x16_t)__p0);
+  __ret = (uint8_t) __builtin_neon_vminvq_u8(__p0);
   return __ret;
 }
 #else
 __ai uint8_t vminvq_u8(uint8x16_t __p0) {
   uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminvq_u8((int8x16_t)__rev0);
+  __ret = (uint8_t) __builtin_neon_vminvq_u8(__rev0);
   return __ret;
 }
 #endif
@@ -48327,14 +48327,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32_t vminvq_u32(uint32x4_t __p0) {
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminvq_u32((int8x16_t)__p0);
+  __ret = (uint32_t) __builtin_neon_vminvq_u32(__p0);
   return __ret;
 }
 #else
 __ai uint32_t vminvq_u32(uint32x4_t __p0) {
   uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminvq_u32((int8x16_t)__rev0);
+  __ret = (uint32_t) __builtin_neon_vminvq_u32(__rev0);
   return __ret;
 }
 #endif
@@ -48342,14 +48342,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint16_t vminvq_u16(uint16x8_t __p0) {
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminvq_u16((int8x16_t)__p0);
+  __ret = (uint16_t) __builtin_neon_vminvq_u16(__p0);
   return __ret;
 }
 #else
 __ai uint16_t vminvq_u16(uint16x8_t __p0) {
   uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminvq_u16((int8x16_t)__rev0);
+  __ret = (uint16_t) __builtin_neon_vminvq_u16(__rev0);
   return __ret;
 }
 #endif
@@ -48357,14 +48357,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int8_t vminvq_s8(int8x16_t __p0) {
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminvq_s8((int8x16_t)__p0);
+  __ret = (int8_t) __builtin_neon_vminvq_s8(__p0);
   return __ret;
 }
 #else
 __ai int8_t vminvq_s8(int8x16_t __p0) {
   int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminvq_s8((int8x16_t)__rev0);
+  __ret = (int8_t) __builtin_neon_vminvq_s8(__rev0);
   return __ret;
 }
 #endif
@@ -48372,14 +48372,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float64_t vminvq_f64(float64x2_t __p0) {
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminvq_f64((int8x16_t)__p0);
+  __ret = (float64_t) __builtin_neon_vminvq_f64(__p0);
   return __ret;
 }
 #else
 __ai float64_t vminvq_f64(float64x2_t __p0) {
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminvq_f64((int8x16_t)__rev0);
+  __ret = (float64_t) __builtin_neon_vminvq_f64(__rev0);
   return __ret;
 }
 #endif
@@ -48387,14 +48387,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vminvq_f32(float32x4_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminvq_f32((int8x16_t)__p0);
+  __ret = (float32_t) __builtin_neon_vminvq_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vminvq_f32(float32x4_t __p0) {
   float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminvq_f32((int8x16_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vminvq_f32(__rev0);
   return __ret;
 }
 #endif
@@ -48402,14 +48402,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32_t vminvq_s32(int32x4_t __p0) {
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminvq_s32((int8x16_t)__p0);
+  __ret = (int32_t) __builtin_neon_vminvq_s32(__p0);
   return __ret;
 }
 #else
 __ai int32_t vminvq_s32(int32x4_t __p0) {
   int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminvq_s32((int8x16_t)__rev0);
+  __ret = (int32_t) __builtin_neon_vminvq_s32(__rev0);
   return __ret;
 }
 #endif
@@ -48417,14 +48417,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16_t vminvq_s16(int16x8_t __p0) {
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminvq_s16((int8x16_t)__p0);
+  __ret = (int16_t) __builtin_neon_vminvq_s16(__p0);
   return __ret;
 }
 #else
 __ai int16_t vminvq_s16(int16x8_t __p0) {
   int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminvq_s16((int8x16_t)__rev0);
+  __ret = (int16_t) __builtin_neon_vminvq_s16(__rev0);
   return __ret;
 }
 #endif
@@ -48432,14 +48432,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint8_t vminv_u8(uint8x8_t __p0) {
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminv_u8((int8x8_t)__p0);
+  __ret = (uint8_t) __builtin_neon_vminv_u8(__p0);
   return __ret;
 }
 #else
 __ai uint8_t vminv_u8(uint8x8_t __p0) {
   uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminv_u8((int8x8_t)__rev0);
+  __ret = (uint8_t) __builtin_neon_vminv_u8(__rev0);
   return __ret;
 }
 #endif
@@ -48447,14 +48447,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint32_t vminv_u32(uint32x2_t __p0) {
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminv_u32((int8x8_t)__p0);
+  __ret = (uint32_t) __builtin_neon_vminv_u32(__p0);
   return __ret;
 }
 #else
 __ai uint32_t vminv_u32(uint32x2_t __p0) {
   uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminv_u32((int8x8_t)__rev0);
+  __ret = (uint32_t) __builtin_neon_vminv_u32(__rev0);
   return __ret;
 }
 #endif
@@ -48462,14 +48462,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint16_t vminv_u16(uint16x4_t __p0) {
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminv_u16((int8x8_t)__p0);
+  __ret = (uint16_t) __builtin_neon_vminv_u16(__p0);
   return __ret;
 }
 #else
 __ai uint16_t vminv_u16(uint16x4_t __p0) {
   uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminv_u16((int8x8_t)__rev0);
+  __ret = (uint16_t) __builtin_neon_vminv_u16(__rev0);
   return __ret;
 }
 #endif
@@ -48477,14 +48477,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int8_t vminv_s8(int8x8_t __p0) {
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminv_s8((int8x8_t)__p0);
+  __ret = (int8_t) __builtin_neon_vminv_s8(__p0);
   return __ret;
 }
 #else
 __ai int8_t vminv_s8(int8x8_t __p0) {
   int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminv_s8((int8x8_t)__rev0);
+  __ret = (int8_t) __builtin_neon_vminv_s8(__rev0);
   return __ret;
 }
 #endif
@@ -48492,14 +48492,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vminv_f32(float32x2_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminv_f32((int8x8_t)__p0);
+  __ret = (float32_t) __builtin_neon_vminv_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vminv_f32(float32x2_t __p0) {
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminv_f32((int8x8_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vminv_f32(__rev0);
   return __ret;
 }
 #endif
@@ -48507,14 +48507,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int32_t vminv_s32(int32x2_t __p0) {
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminv_s32((int8x8_t)__p0);
+  __ret = (int32_t) __builtin_neon_vminv_s32(__p0);
   return __ret;
 }
 #else
 __ai int32_t vminv_s32(int32x2_t __p0) {
   int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminv_s32((int8x8_t)__rev0);
+  __ret = (int32_t) __builtin_neon_vminv_s32(__rev0);
   return __ret;
 }
 #endif
@@ -48522,14 +48522,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int16_t vminv_s16(int16x4_t __p0) {
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminv_s16((int8x8_t)__p0);
+  __ret = (int16_t) __builtin_neon_vminv_s16(__p0);
   return __ret;
 }
 #else
 __ai int16_t vminv_s16(int16x4_t __p0) {
   int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminv_s16((int8x8_t)__rev0);
+  __ret = (int16_t) __builtin_neon_vminv_s16(__rev0);
   return __ret;
 }
 #endif
@@ -50321,7 +50321,7 @@
 
 __ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
   float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmul_n_f64((int8x8_t)__p0, __p1);
+  __ret = (float64x1_t) __builtin_neon_vmul_n_f64((float64x1_t)__p0, __p1);
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
@@ -51286,14 +51286,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai uint64_t vpaddd_u64(uint64x2_t __p0) {
   uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vpaddd_u64((int8x16_t)__p0);
+  __ret = (uint64_t) __builtin_neon_vpaddd_u64(__p0);
   return __ret;
 }
 #else
 __ai uint64_t vpaddd_u64(uint64x2_t __p0) {
   uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vpaddd_u64((int8x16_t)__rev0);
+  __ret = (uint64_t) __builtin_neon_vpaddd_u64(__rev0);
   return __ret;
 }
 #endif
@@ -51301,14 +51301,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float64_t vpaddd_f64(float64x2_t __p0) {
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpaddd_f64((int8x16_t)__p0);
+  __ret = (float64_t) __builtin_neon_vpaddd_f64(__p0);
   return __ret;
 }
 #else
 __ai float64_t vpaddd_f64(float64x2_t __p0) {
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpaddd_f64((int8x16_t)__rev0);
+  __ret = (float64_t) __builtin_neon_vpaddd_f64(__rev0);
   return __ret;
 }
 #endif
@@ -51316,14 +51316,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai int64_t vpaddd_s64(int64x2_t __p0) {
   int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vpaddd_s64((int8x16_t)__p0);
+  __ret = (int64_t) __builtin_neon_vpaddd_s64(__p0);
   return __ret;
 }
 #else
 __ai int64_t vpaddd_s64(int64x2_t __p0) {
   int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vpaddd_s64((int8x16_t)__rev0);
+  __ret = (int64_t) __builtin_neon_vpaddd_s64(__rev0);
   return __ret;
 }
 #endif
@@ -51331,14 +51331,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vpadds_f32(float32x2_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpadds_f32((int8x8_t)__p0);
+  __ret = (float32_t) __builtin_neon_vpadds_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vpadds_f32(float32x2_t __p0) {
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpadds_f32((int8x8_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vpadds_f32(__rev0);
   return __ret;
 }
 #endif
@@ -51482,14 +51482,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float64_t vpmaxqd_f64(float64x2_t __p0) {
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxqd_f64((int8x16_t)__p0);
+  __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__p0);
   return __ret;
 }
 #else
 __ai float64_t vpmaxqd_f64(float64x2_t __p0) {
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxqd_f64((int8x16_t)__rev0);
+  __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__rev0);
   return __ret;
 }
 #endif
@@ -51497,14 +51497,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vpmaxs_f32(float32x2_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxs_f32((int8x8_t)__p0);
+  __ret = (float32_t) __builtin_neon_vpmaxs_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vpmaxs_f32(float32x2_t __p0) {
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxs_f32((int8x8_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vpmaxs_f32(__rev0);
   return __ret;
 }
 #endif
@@ -51563,14 +51563,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64((int8x16_t)__p0);
+  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__p0);
   return __ret;
 }
 #else
 __ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64((int8x16_t)__rev0);
+  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__rev0);
   return __ret;
 }
 #endif
@@ -51578,14 +51578,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vpmaxnms_f32(float32x2_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxnms_f32((int8x8_t)__p0);
+  __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vpmaxnms_f32(float32x2_t __p0) {
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxnms_f32((int8x8_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__rev0);
   return __ret;
 }
 #endif
@@ -51729,14 +51729,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float64_t vpminqd_f64(float64x2_t __p0) {
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminqd_f64((int8x16_t)__p0);
+  __ret = (float64_t) __builtin_neon_vpminqd_f64(__p0);
   return __ret;
 }
 #else
 __ai float64_t vpminqd_f64(float64x2_t __p0) {
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminqd_f64((int8x16_t)__rev0);
+  __ret = (float64_t) __builtin_neon_vpminqd_f64(__rev0);
   return __ret;
 }
 #endif
@@ -51744,14 +51744,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vpmins_f32(float32x2_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmins_f32((int8x8_t)__p0);
+  __ret = (float32_t) __builtin_neon_vpmins_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vpmins_f32(float32x2_t __p0) {
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmins_f32((int8x8_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vpmins_f32(__rev0);
   return __ret;
 }
 #endif
@@ -51810,14 +51810,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float64_t vpminnmqd_f64(float64x2_t __p0) {
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminnmqd_f64((int8x16_t)__p0);
+  __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__p0);
   return __ret;
 }
 #else
 __ai float64_t vpminnmqd_f64(float64x2_t __p0) {
   float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminnmqd_f64((int8x16_t)__rev0);
+  __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__rev0);
   return __ret;
 }
 #endif
@@ -51825,14 +51825,14 @@
 #ifdef __LITTLE_ENDIAN__
 __ai float32_t vpminnms_f32(float32x2_t __p0) {
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpminnms_f32((int8x8_t)__p0);
+  __ret = (float32_t) __builtin_neon_vpminnms_f32(__p0);
   return __ret;
 }
 #else
 __ai float32_t vpminnms_f32(float32x2_t __p0) {
   float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpminnms_f32((int8x8_t)__rev0);
+  __ret = (float32_t) __builtin_neon_vpminnms_f32(__rev0);
   return __ret;
 }
 #endif
@@ -52100,7 +52100,7 @@
   int32_t __s1 = __p1; \
   int32x2_t __s2 = __p2; \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, (int8x8_t)__s2, __p3); \
+  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __s2, __p3); \
   __ret; \
 })
 #else
@@ -52110,7 +52110,7 @@
   int32x2_t __s2 = __p2; \
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, (int8x8_t)__rev2, __p3); \
+  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __rev2, __p3); \
   __ret; \
 })
 #endif
@@ -52121,7 +52121,7 @@
   int16_t __s1 = __p1; \
   int16x4_t __s2 = __p2; \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, (int8x8_t)__s2, __p3); \
+  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __s2, __p3); \
   __ret; \
 })
 #else
@@ -52131,7 +52131,7 @@
   int16x4_t __s2 = __p2; \
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, (int8x8_t)__rev2, __p3); \
+  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __rev2, __p3); \
   __ret; \
 })
 #endif
@@ -52142,7 +52142,7 @@
   int32_t __s1 = __p1; \
   int32x4_t __s2 = __p2; \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, (int8x16_t)__s2, __p3); \
+  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __s2, __p3); \
   __ret; \
 })
 #else
@@ -52152,7 +52152,7 @@
   int32x4_t __s2 = __p2; \
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, (int8x16_t)__rev2, __p3); \
+  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __rev2, __p3); \
   __ret; \
 })
 #endif
@@ -52163,7 +52163,7 @@
   int16_t __s1 = __p1; \
   int16x8_t __s2 = __p2; \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, (int8x16_t)__s2, __p3); \
+  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __s2, __p3); \
   __ret; \
 })
 #else
@@ -52173,7 +52173,7 @@
   int16x8_t __s2 = __p2; \
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, (int8x16_t)__rev2, __p3); \
+  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __rev2, __p3); \
   __ret; \
 })
 #endif
@@ -52408,7 +52408,7 @@
   int32_t __s1 = __p1; \
   int32x2_t __s2 = __p2; \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, (int8x8_t)__s2, __p3); \
+  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __s2, __p3); \
   __ret; \
 })
 #else
@@ -52418,7 +52418,7 @@
   int32x2_t __s2 = __p2; \
   int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, (int8x8_t)__rev2, __p3); \
+  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __rev2, __p3); \
   __ret; \
 })
 #endif
@@ -52429,7 +52429,7 @@
   int16_t __s1 = __p1; \
   int16x4_t __s2 = __p2; \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, (int8x8_t)__s2, __p3); \
+  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __s2, __p3); \
   __ret; \
 })
 #else
@@ -52439,7 +52439,7 @@
   int16x4_t __s2 = __p2; \
   int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, (int8x8_t)__rev2, __p3); \
+  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __rev2, __p3); \
   __ret; \
 })
 #endif
@@ -52450,7 +52450,7 @@
   int32_t __s1 = __p1; \
   int32x4_t __s2 = __p2; \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, (int8x16_t)__s2, __p3); \
+  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __s2, __p3); \
   __ret; \
 })
 #else
@@ -52460,7 +52460,7 @@
   int32x4_t __s2 = __p2; \
   int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
   int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, (int8x16_t)__rev2, __p3); \
+  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __rev2, __p3); \
   __ret; \
 })
 #endif
@@ -52471,7 +52471,7 @@
   int16_t __s1 = __p1; \
   int16x8_t __s2 = __p2; \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, (int8x16_t)__s2, __p3); \
+  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __s2, __p3); \
   __ret; \
 })
 #else
@@ -52481,7 +52481,7 @@
   int16x8_t __s2 = __p2; \
   int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
   int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, (int8x16_t)__rev2, __p3); \
+  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __rev2, __p3); \
   __ret; \
 })
 #endif
@@ -55631,7 +55631,7 @@
   poly64_t __s0 = __p0; \
   poly64x1_t __s1 = __p1; \
   poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (poly64x1_t)__s1, __p2); \
   __ret; \
 })
 #ifdef __LITTLE_ENDIAN__
@@ -55639,7 +55639,7 @@
   poly64_t __s0 = __p0; \
   poly64x2_t __s1 = __p1; \
   poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -55648,7 +55648,7 @@
   poly64x2_t __s1 = __p1; \
   poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
   poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
@@ -55656,7 +55656,7 @@
   poly64_t __s0 = __p0; \
   poly64x2_t __s1 = __p1; \
   poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -55666,7 +55666,7 @@
   float64_t __s0 = __p0; \
   float64x2_t __s1 = __p1; \
   float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \
   __ret; \
 })
 #else
@@ -55675,7 +55675,7 @@
   float64x2_t __s1 = __p1; \
   float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
   float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__rev1, __p2); \
+  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__rev1, __p2); \
   __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
   __ret; \
 })
@@ -55683,7 +55683,7 @@
   float64_t __s0 = __p0; \
   float64x2_t __s1 = __p1; \
   float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__s1, __p2); \
+  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \
   __ret; \
 })
 #endif
@@ -55692,7 +55692,7 @@
   float64_t __s0 = __p0; \
   float64x1_t __s1 = __p1; \
   float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \
+  __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (float64x1_t)__s1, __p2); \
   __ret; \
 })
 __ai uint64_t vshld_u64(uint64_t __p0, uint64_t __p1) {
@@ -56348,7 +56348,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
   float64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 42); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \
 })
 #else
 #define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
@@ -56356,13 +56356,13 @@
   float64x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 42); \
+  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \
 })
 #endif
 
 #define vst1_f64_x2(__p0, __p1) __extension__ ({ \
   float64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 10); \
+  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \
 })
 #define vst1_p64_x3(__p0, __p1) __extension__ ({ \
   poly64x1x3_t __s1 = __p1; \
@@ -56387,7 +56387,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
   float64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 42); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \
 })
 #else
 #define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
@@ -56396,13 +56396,13 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 42); \
+  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \
 })
 #endif
 
 #define vst1_f64_x3(__p0, __p1) __extension__ ({ \
   float64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
+  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \
 })
 #define vst1_p64_x4(__p0, __p1) __extension__ ({ \
   poly64x1x4_t __s1 = __p1; \
@@ -56428,7 +56428,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
   float64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 42); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \
 })
 #else
 #define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
@@ -56438,13 +56438,13 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 42); \
+  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \
 })
 #endif
 
 #define vst1_f64_x4(__p0, __p1) __extension__ ({ \
   float64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
+  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \
 })
 #define vst2_p64(__p0, __p1) __extension__ ({ \
   poly64x1x2_t __s1 = __p1; \
@@ -56483,7 +56483,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_f64(__p0, __p1) __extension__ ({ \
   float64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 42); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \
 })
 #else
 #define vst2q_f64(__p0, __p1) __extension__ ({ \
@@ -56491,14 +56491,14 @@
   float64x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 42); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_s64(__p0, __p1) __extension__ ({ \
   int64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 35); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \
 })
 #else
 #define vst2q_s64(__p0, __p1) __extension__ ({ \
@@ -56506,13 +56506,13 @@
   int64x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 35); \
+  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \
 })
 #endif
 
 #define vst2_f64(__p0, __p1) __extension__ ({ \
   float64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 10); \
+  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \
 })
 #define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
   poly64x1x2_t __s1 = __p1; \
@@ -56596,7 +56596,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 42); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \
 })
 #else
 #define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
@@ -56604,14 +56604,14 @@
   float64x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 42); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 35); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \
 })
 #else
 #define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
@@ -56619,7 +56619,7 @@
   int64x2x2_t __rev1; \
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 35); \
+  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \
 })
 #endif
 
@@ -56629,11 +56629,11 @@
 })
 #define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 10); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \
 })
 #define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 3); \
+  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \
 })
 #define vst3_p64(__p0, __p1) __extension__ ({ \
   poly64x1x3_t __s1 = __p1; \
@@ -56674,7 +56674,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_f64(__p0, __p1) __extension__ ({ \
   float64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 42); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \
 })
 #else
 #define vst3q_f64(__p0, __p1) __extension__ ({ \
@@ -56683,14 +56683,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 42); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_s64(__p0, __p1) __extension__ ({ \
   int64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 35); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \
 })
 #else
 #define vst3q_s64(__p0, __p1) __extension__ ({ \
@@ -56699,13 +56699,13 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 35); \
+  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \
 })
 #endif
 
 #define vst3_f64(__p0, __p1) __extension__ ({ \
   float64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
+  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \
 })
 #define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
   poly64x1x3_t __s1 = __p1; \
@@ -56794,7 +56794,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 42); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \
 })
 #else
 #define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
@@ -56803,14 +56803,14 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 42); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 35); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \
 })
 #else
 #define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
@@ -56819,7 +56819,7 @@
   __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 35); \
+  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \
 })
 #endif
 
@@ -56829,11 +56829,11 @@
 })
 #define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \
 })
 #define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
+  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \
 })
 #define vst4_p64(__p0, __p1) __extension__ ({ \
   poly64x1x4_t __s1 = __p1; \
@@ -56876,7 +56876,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_f64(__p0, __p1) __extension__ ({ \
   float64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 42); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \
 })
 #else
 #define vst4q_f64(__p0, __p1) __extension__ ({ \
@@ -56886,14 +56886,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 42); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_s64(__p0, __p1) __extension__ ({ \
   int64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 35); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \
 })
 #else
 #define vst4q_s64(__p0, __p1) __extension__ ({ \
@@ -56903,13 +56903,13 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 35); \
+  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \
 })
 #endif
 
 #define vst4_f64(__p0, __p1) __extension__ ({ \
   float64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
+  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \
 })
 #define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
   poly64x1x4_t __s1 = __p1; \
@@ -57003,7 +57003,7 @@
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 42); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \
 })
 #else
 #define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
@@ -57013,14 +57013,14 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 42); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \
 })
 #endif
 
 #ifdef __LITTLE_ENDIAN__
 #define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 35); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \
 })
 #else
 #define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
@@ -57030,7 +57030,7 @@
   __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
   __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
   __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 35); \
+  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \
 })
 #endif
 
@@ -57040,11 +57040,11 @@
 })
 #define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
   float64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \
 })
 #define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
   int64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
+  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \
 })
 #define vstrq_p128(__p0, __p1) __extension__ ({ \
   poly128_t __s1 = __p1; \
diff --git a/linux-x86/lib64/clang/10.0.1/include/armintr.h b/linux-x86/lib64/clang/10.0.5/include/armintr.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/armintr.h
rename to linux-x86/lib64/clang/10.0.5/include/armintr.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx2intrin.h b/linux-x86/lib64/clang/10.0.5/include/avx2intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx2intrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx2intrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512bf16intrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512bf16intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512bf16intrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512bf16intrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512bitalgintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512bitalgintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512bitalgintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512bitalgintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512bwintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512bwintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512bwintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512bwintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512cdintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512cdintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512cdintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512cdintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512dqintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512dqintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512dqintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512dqintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512erintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512erintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512erintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512erintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/avx512fintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512fintrin.h
similarity index 99%
copy from darwin-x86/lib64/clang/10.0.1/include/avx512fintrin.h
copy to linux-x86/lib64/clang/10.0.5/include/avx512fintrin.h
index 4e341a1..698e477 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/avx512fintrin.h
+++ b/linux-x86/lib64/clang/10.0.5/include/avx512fintrin.h
@@ -7658,13 +7658,13 @@
 #define _mm512_i32gather_ps(index, addr, scale) \
   (__m512)__builtin_ia32_gathersiv16sf((__v16sf)_mm512_undefined_ps(), \
                                        (void const *)(addr), \
-                                       (__v16sf)(__m512)(index), \
+                                       (__v16si)(__m512)(index), \
                                        (__mmask16)-1, (int)(scale))
 
 #define _mm512_mask_i32gather_ps(v1_old, mask, index, addr, scale) \
   (__m512)__builtin_ia32_gathersiv16sf((__v16sf)(__m512)(v1_old), \
                                        (void const *)(addr), \
-                                       (__v16sf)(__m512)(index), \
+                                       (__v16si)(__m512)(index), \
                                        (__mmask16)(mask), (int)(scale))
 
 #define _mm512_i32gather_epi32(index, addr, scale) \
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512ifmaintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512ifmaintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512ifmaintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512ifmaintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512ifmavlintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512ifmavlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512ifmavlintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512ifmavlintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512pfintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512pfintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512pfintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512pfintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512vbmi2intrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512vbmi2intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512vbmi2intrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512vbmi2intrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512vbmiintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512vbmiintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512vbmiintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512vbmiintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512vbmivlintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512vbmivlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512vbmivlintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512vbmivlintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512vlbf16intrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512vlbf16intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512vlbf16intrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512vlbf16intrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512vlbitalgintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512vlbitalgintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512vlbitalgintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512vlbitalgintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512vlbwintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512vlbwintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512vlbwintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512vlbwintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512vlcdintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512vlcdintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512vlcdintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512vlcdintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512vldqintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512vldqintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512vldqintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512vldqintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512vlintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512vlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512vlintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512vlintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512vlvbmi2intrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512vlvbmi2intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512vlvbmi2intrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512vlvbmi2intrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512vlvnniintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512vlvnniintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512vlvnniintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512vlvnniintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512vlvp2intersectintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512vlvp2intersectintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512vlvp2intersectintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512vlvp2intersectintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512vnniintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512vnniintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512vnniintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512vnniintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512vp2intersectintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512vp2intersectintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512vp2intersectintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512vp2intersectintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512vpopcntdqintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512vpopcntdqintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512vpopcntdqintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512vpopcntdqintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avx512vpopcntdqvlintrin.h b/linux-x86/lib64/clang/10.0.5/include/avx512vpopcntdqvlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avx512vpopcntdqvlintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avx512vpopcntdqvlintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/avxintrin.h b/linux-x86/lib64/clang/10.0.5/include/avxintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/avxintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/avxintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/bits/stdatomic.h b/linux-x86/lib64/clang/10.0.5/include/bits/stdatomic.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/bits/stdatomic.h
rename to linux-x86/lib64/clang/10.0.5/include/bits/stdatomic.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/bmi2intrin.h b/linux-x86/lib64/clang/10.0.5/include/bmi2intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/bmi2intrin.h
rename to linux-x86/lib64/clang/10.0.5/include/bmi2intrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/bmiintrin.h b/linux-x86/lib64/clang/10.0.5/include/bmiintrin.h
similarity index 98%
copy from darwin-x86/lib64/clang/10.0.1/include/bmiintrin.h
copy to linux-x86/lib64/clang/10.0.5/include/bmiintrin.h
index b7af62f..841bd84 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/bmiintrin.h
+++ b/linux-x86/lib64/clang/10.0.5/include/bmiintrin.h
@@ -14,27 +14,13 @@
 #ifndef __BMIINTRIN_H
 #define __BMIINTRIN_H
 
-#define _tzcnt_u16(a)     (__tzcnt_u16((a)))
-
-#define _andn_u32(a, b)   (__andn_u32((a), (b)))
-
-/* _bextr_u32 != __bextr_u32 */
-#define _blsi_u32(a)      (__blsi_u32((a)))
-
-#define _blsmsk_u32(a)    (__blsmsk_u32((a)))
-
-#define _blsr_u32(a)      (__blsr_u32((a)))
-
-#define _tzcnt_u32(a)     (__tzcnt_u32((a)))
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi")))
-
 /* Allow using the tzcnt intrinsics even for non-BMI targets. Since the TZCNT
    instruction behaves as BSF on non-BMI targets, there is code that expects
    to use it as a potentially faster version of BSF. */
 #define __RELAXED_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
 
+#define _tzcnt_u16(a)     (__tzcnt_u16((a)))
+
 /// Counts the number of trailing zero bits in the operand.
 ///
 /// \headerfile <x86intrin.h>
@@ -51,6 +37,94 @@
   return __builtin_ia32_tzcnt_u16(__X);
 }
 
+/// Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
+///
+/// \param __X
+///    An unsigned 32-bit integer whose trailing zeros are to be counted.
+/// \returns An unsigned 32-bit integer containing the number of trailing zero
+///    bits in the operand.
+static __inline__ unsigned int __RELAXED_FN_ATTRS
+__tzcnt_u32(unsigned int __X)
+{
+  return __builtin_ia32_tzcnt_u32(__X);
+}
+
+/// Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
+///
+/// \param __X
+///    An unsigned 32-bit integer whose trailing zeros are to be counted.
+/// \returns An 32-bit integer containing the number of trailing zero bits in
+///    the operand.
+static __inline__ int __RELAXED_FN_ATTRS
+_mm_tzcnt_32(unsigned int __X)
+{
+  return __builtin_ia32_tzcnt_u32(__X);
+}
+
+#define _tzcnt_u32(a)     (__tzcnt_u32((a)))
+
+#ifdef __x86_64__
+
+/// Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
+///
+/// \param __X
+///    An unsigned 64-bit integer whose trailing zeros are to be counted.
+/// \returns An unsigned 64-bit integer containing the number of trailing zero
+///    bits in the operand.
+static __inline__ unsigned long long __RELAXED_FN_ATTRS
+__tzcnt_u64(unsigned long long __X)
+{
+  return __builtin_ia32_tzcnt_u64(__X);
+}
+
+/// Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
+///
+/// \param __X
+///    An unsigned 64-bit integer whose trailing zeros are to be counted.
+/// \returns An 64-bit integer containing the number of trailing zero bits in
+///    the operand.
+static __inline__ long long __RELAXED_FN_ATTRS
+_mm_tzcnt_64(unsigned long long __X)
+{
+  return __builtin_ia32_tzcnt_u64(__X);
+}
+
+#define _tzcnt_u64(a)     (__tzcnt_u64((a)))
+
+#endif /* __x86_64__ */
+
+#undef __RELAXED_FN_ATTRS
+
+#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__)
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi")))
+
+#define _andn_u32(a, b)   (__andn_u32((a), (b)))
+
+/* _bextr_u32 != __bextr_u32 */
+#define _blsi_u32(a)      (__blsi_u32((a)))
+
+#define _blsmsk_u32(a)    (__blsmsk_u32((a)))
+
+#define _blsr_u32(a)      (__blsr_u32((a)))
+
 /// Performs a bitwise AND of the second operand with the one's
 ///    complement of the first operand.
 ///
@@ -169,38 +243,6 @@
   return __X & (__X - 1);
 }
 
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 32-bit integer whose trailing zeros are to be counted.
-/// \returns An unsigned 32-bit integer containing the number of trailing zero
-///    bits in the operand.
-static __inline__ unsigned int __RELAXED_FN_ATTRS
-__tzcnt_u32(unsigned int __X)
-{
-  return __builtin_ia32_tzcnt_u32(__X);
-}
-
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 32-bit integer whose trailing zeros are to be counted.
-/// \returns An 32-bit integer containing the number of trailing zero bits in
-///    the operand.
-static __inline__ int __RELAXED_FN_ATTRS
-_mm_tzcnt_32(unsigned int __X)
-{
-  return __builtin_ia32_tzcnt_u32(__X);
-}
-
 #ifdef __x86_64__
 
 #define _andn_u64(a, b)   (__andn_u64((a), (b)))
@@ -212,8 +254,6 @@
 
 #define _blsr_u64(a)      (__blsr_u64((a)))
 
-#define _tzcnt_u64(a)     (__tzcnt_u64((a)))
-
 /// Performs a bitwise AND of the second operand with the one's
 ///    complement of the first operand.
 ///
@@ -332,41 +372,10 @@
   return __X & (__X - 1);
 }
 
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer whose trailing zeros are to be counted.
-/// \returns An unsigned 64-bit integer containing the number of trailing zero
-///    bits in the operand.
-static __inline__ unsigned long long __RELAXED_FN_ATTRS
-__tzcnt_u64(unsigned long long __X)
-{
-  return __builtin_ia32_tzcnt_u64(__X);
-}
-
-/// Counts the number of trailing zero bits in the operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
-///
-/// \param __X
-///    An unsigned 64-bit integer whose trailing zeros are to be counted.
-/// \returns An 64-bit integer containing the number of trailing zero bits in
-///    the operand.
-static __inline__ long long __RELAXED_FN_ATTRS
-_mm_tzcnt_64(unsigned long long __X)
-{
-  return __builtin_ia32_tzcnt_u64(__X);
-}
-
 #endif /* __x86_64__ */
 
 #undef __DEFAULT_FN_ATTRS
-#undef __RELAXED_FN_ATTRS
+
+#endif /* !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__) */
 
 #endif /* __BMIINTRIN_H */
diff --git a/linux-x86/lib64/clang/10.0.1/include/cetintrin.h b/linux-x86/lib64/clang/10.0.5/include/cetintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/cetintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/cetintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/cldemoteintrin.h b/linux-x86/lib64/clang/10.0.5/include/cldemoteintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/cldemoteintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/cldemoteintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/clflushoptintrin.h b/linux-x86/lib64/clang/10.0.5/include/clflushoptintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/clflushoptintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/clflushoptintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/clwbintrin.h b/linux-x86/lib64/clang/10.0.5/include/clwbintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/clwbintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/clwbintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/clzerointrin.h b/linux-x86/lib64/clang/10.0.5/include/clzerointrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/clzerointrin.h
rename to linux-x86/lib64/clang/10.0.5/include/clzerointrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/cpuid.h b/linux-x86/lib64/clang/10.0.5/include/cpuid.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/cpuid.h
rename to linux-x86/lib64/clang/10.0.5/include/cpuid.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/cuda_wrappers/algorithm b/linux-x86/lib64/clang/10.0.5/include/cuda_wrappers/algorithm
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/cuda_wrappers/algorithm
rename to linux-x86/lib64/clang/10.0.5/include/cuda_wrappers/algorithm
diff --git a/linux-x86/lib64/clang/10.0.1/include/cuda_wrappers/complex b/linux-x86/lib64/clang/10.0.5/include/cuda_wrappers/complex
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/cuda_wrappers/complex
rename to linux-x86/lib64/clang/10.0.5/include/cuda_wrappers/complex
diff --git a/linux-x86/lib64/clang/10.0.1/include/cuda_wrappers/new b/linux-x86/lib64/clang/10.0.5/include/cuda_wrappers/new
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/cuda_wrappers/new
rename to linux-x86/lib64/clang/10.0.5/include/cuda_wrappers/new
diff --git a/darwin-x86/lib64/clang/10.0.1/include/emmintrin.h b/linux-x86/lib64/clang/10.0.5/include/emmintrin.h
similarity index 99%
copy from darwin-x86/lib64/clang/10.0.1/include/emmintrin.h
copy to linux-x86/lib64/clang/10.0.5/include/emmintrin.h
index c8fefdf..f98b7e7 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/emmintrin.h
+++ b/linux-x86/lib64/clang/10.0.5/include/emmintrin.h
@@ -2288,7 +2288,7 @@
   return (__m128i)__builtin_ia32_paddusw128((__v8hi)__a, (__v8hi)__b);
 }
 
-/// Computes the rounded avarages of corresponding elements of two
+/// Computes the rounded averages of corresponding elements of two
 ///    128-bit unsigned [16 x i8] vectors, saving each result in the
 ///    corresponding element of a 128-bit result vector of [16 x i8].
 ///
@@ -2308,7 +2308,7 @@
   return (__m128i)__builtin_ia32_pavgb128((__v16qi)__a, (__v16qi)__b);
 }
 
-/// Computes the rounded avarages of corresponding elements of two
+/// Computes the rounded averages of corresponding elements of two
 ///    128-bit unsigned [8 x i16] vectors, saving each result in the
 ///    corresponding element of a 128-bit result vector of [8 x i16].
 ///
diff --git a/linux-x86/lib64/clang/10.0.1/include/enqcmdintrin.h b/linux-x86/lib64/clang/10.0.5/include/enqcmdintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/enqcmdintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/enqcmdintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/f16cintrin.h b/linux-x86/lib64/clang/10.0.5/include/f16cintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/f16cintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/f16cintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/float.h b/linux-x86/lib64/clang/10.0.5/include/float.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/float.h
rename to linux-x86/lib64/clang/10.0.5/include/float.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/fma4intrin.h b/linux-x86/lib64/clang/10.0.5/include/fma4intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/fma4intrin.h
rename to linux-x86/lib64/clang/10.0.5/include/fma4intrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/fmaintrin.h b/linux-x86/lib64/clang/10.0.5/include/fmaintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/fmaintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/fmaintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/fuzzer/FuzzedDataProvider.h b/linux-x86/lib64/clang/10.0.5/include/fuzzer/FuzzedDataProvider.h
similarity index 98%
copy from darwin-x86/lib64/clang/10.0.1/include/fuzzer/FuzzedDataProvider.h
copy to linux-x86/lib64/clang/10.0.5/include/fuzzer/FuzzedDataProvider.h
index 59ef284..fd895b7 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/fuzzer/FuzzedDataProvider.h
+++ b/linux-x86/lib64/clang/10.0.5/include/fuzzer/FuzzedDataProvider.h
@@ -196,7 +196,7 @@
     // Use different integral types for different floating point types in order
     // to provide better density of the resulting values.
     using IntegralType =
-        typename std::conditional<sizeof(T) <= sizeof(uint32_t), uint32_t,
+        typename std::conditional<(sizeof(T) <= sizeof(uint32_t)), uint32_t,
                                   uint64_t>::type;
 
     T result = static_cast<T>(ConsumeIntegral<IntegralType>());
@@ -284,9 +284,9 @@
 
     // Avoid using implementation-defined unsigned to signer conversions.
     // To learn more, see https://stackoverflow.com/questions/13150449.
-    if (value <= std::numeric_limits<TS>::max())
+    if (value <= std::numeric_limits<TS>::max()) {
       return static_cast<TS>(value);
-    else {
+    } else {
       constexpr auto TS_min = std::numeric_limits<TS>::min();
       return TS_min + static_cast<char>(value - TS_min);
     }
diff --git a/linux-x86/lib64/clang/10.0.1/include/fxsrintrin.h b/linux-x86/lib64/clang/10.0.5/include/fxsrintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/fxsrintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/fxsrintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/gfniintrin.h b/linux-x86/lib64/clang/10.0.5/include/gfniintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/gfniintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/gfniintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/htmintrin.h b/linux-x86/lib64/clang/10.0.5/include/htmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/htmintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/htmintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/htmxlintrin.h b/linux-x86/lib64/clang/10.0.5/include/htmxlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/htmxlintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/htmxlintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/ia32intrin.h b/linux-x86/lib64/clang/10.0.5/include/ia32intrin.h
similarity index 83%
rename from linux-x86/lib64/clang/10.0.1/include/ia32intrin.h
rename to linux-x86/lib64/clang/10.0.5/include/ia32intrin.h
index 8e38df7..79b7f06 100644
--- a/linux-x86/lib64/clang/10.0.1/include/ia32intrin.h
+++ b/linux-x86/lib64/clang/10.0.5/include/ia32intrin.h
@@ -195,6 +195,74 @@
 }
 #endif /* !__x86_64__ */
 
+/** Cast a 32-bit float value to a 32-bit unsigned integer value
+ *
+ *  \headerfile <x86intrin.h>
+ *  This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction in x86_64,
+ *  and corresponds to the <c> VMOVL / MOVL </c> instruction in ia32.
+ *
+ *  \param __A
+ *     A 32-bit float value.
+ *  \returns a 32-bit unsigned integer containing the converted value.
+ */
+static __inline__ unsigned int __attribute__((__always_inline__))
+_castf32_u32(float __A) {
+  unsigned int D;
+  __builtin_memcpy(&D, &__A, sizeof(__A));
+  return D;
+}
+
+/** Cast a 64-bit float value to a 64-bit unsigned integer value
+ *
+ *  \headerfile <x86intrin.h>
+ *  This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction in x86_64,
+ *  and corresponds to the <c> VMOVL / MOVL </c> instruction in ia32.
+ *
+ *  \param __A
+ *     A 64-bit float value.
+ *  \returns a 64-bit unsigned integer containing the converted value.
+ */
+static __inline__ unsigned long long __attribute__((__always_inline__))
+_castf64_u64(double __A) {
+  unsigned long long D;
+  __builtin_memcpy(&D, &__A, sizeof(__A));
+  return D;
+}
+
+/** Cast a 32-bit unsigned integer value to a 32-bit float value
+ *
+ *  \headerfile <x86intrin.h>
+ *  This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction in x86_64,
+ *  and corresponds to the <c> FLDS </c> instruction in ia32.
+ *
+ *  \param __A
+ *     A 32-bit unsigned integer value.
+ *  \returns a 32-bit float value containing the converted value.
+ */
+static __inline__ float __attribute__((__always_inline__))
+_castu32_f32(unsigned int __A) {
+  float D;
+  __builtin_memcpy(&D, &__A, sizeof(__A));
+  return D;
+}
+
+/** Cast a 64-bit unsigned integer value to a 64-bit float value
+ *
+ *  \headerfile <x86intrin.h>
+ *  This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction in x86_64,
+ *  and corresponds to the <c> FLDL </c> instruction in ia32.
+ *
+ *  \param __A
+ *     A 64-bit unsigned integer value.
+ *  \returns a 64-bit float value containing the converted value.
+ */
+static __inline__ double __attribute__((__always_inline__))
+_castu64_f64(unsigned long long __A) {
+  double D;
+  __builtin_memcpy(&D, &__A, sizeof(__A));
+  return D;
+}
+
 /** Adds the unsigned integer operand to the CRC-32C checksum of the
  *     unsigned char operand.
  *
diff --git a/darwin-x86/lib64/clang/10.0.1/include/immintrin.h b/linux-x86/lib64/clang/10.0.5/include/immintrin.h
similarity index 99%
copy from darwin-x86/lib64/clang/10.0.1/include/immintrin.h
copy to linux-x86/lib64/clang/10.0.5/include/immintrin.h
index 7555ad8..ae900ee 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/immintrin.h
+++ b/linux-x86/lib64/clang/10.0.5/include/immintrin.h
@@ -64,9 +64,8 @@
 #include <vpclmulqdqintrin.h>
 #endif
 
-#if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI__)
+/* No feature check desired due to internal checks */
 #include <bmiintrin.h>
-#endif
 
 #if !defined(_MSC_VER) || __has_feature(modules) || defined(__BMI2__)
 #include <bmi2intrin.h>
diff --git a/darwin-x86/lib64/clang/10.0.1/include/intrin.h b/linux-x86/lib64/clang/10.0.5/include/intrin.h
similarity index 97%
copy from darwin-x86/lib64/clang/10.0.1/include/intrin.h
copy to linux-x86/lib64/clang/10.0.5/include/intrin.h
index 9786ba1..a73a576 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/intrin.h
+++ b/linux-x86/lib64/clang/10.0.5/include/intrin.h
@@ -36,6 +36,12 @@
 /* Define the default attributes for the functions in this file. */
 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
 
+#if __x86_64__
+#define __LPTRINT_TYPE__ __int64
+#else
+#define __LPTRINT_TYPE__ long
+#endif
+
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -94,8 +100,7 @@
 void __outwordstring(unsigned short, unsigned short *, unsigned long);
 unsigned long __readcr0(void);
 unsigned long __readcr2(void);
-static __inline__
-unsigned long __readcr3(void);
+unsigned __LPTRINT_TYPE__ __readcr3(void);
 unsigned long __readcr4(void);
 unsigned long __readcr8(void);
 unsigned int __readdr(unsigned int);
@@ -132,7 +137,7 @@
 void __wbinvd(void);
 void __writecr0(unsigned int);
 static __inline__
-void __writecr3(unsigned int);
+void __writecr3(unsigned __INTPTR_TYPE__);
 void __writecr4(unsigned int);
 void __writecr8(unsigned int);
 void __writedr(unsigned int, unsigned int);
@@ -565,24 +570,26 @@
   __asm__ ("rdmsr" : "=d"(__edx), "=a"(__eax) : "c"(__register));
   return (((unsigned __int64)__edx) << 32) | (unsigned __int64)__eax;
 }
+#endif
 
-static __inline__ unsigned long __DEFAULT_FN_ATTRS
+static __inline__ unsigned __LPTRINT_TYPE__ __DEFAULT_FN_ATTRS
 __readcr3(void) {
-  unsigned long __cr3_val;
-  __asm__ __volatile__ ("mov %%cr3, %0" : "=q"(__cr3_val) : : "memory");
+  unsigned __LPTRINT_TYPE__ __cr3_val;
+  __asm__ __volatile__ ("mov %%cr3, %0" : "=r"(__cr3_val) : : "memory");
   return __cr3_val;
 }
 
 static __inline__ void __DEFAULT_FN_ATTRS
-__writecr3(unsigned int __cr3_val) {
-  __asm__ ("mov %0, %%cr3" : : "q"(__cr3_val) : "memory");
+__writecr3(unsigned __INTPTR_TYPE__ __cr3_val) {
+  __asm__ ("mov %0, %%cr3" : : "r"(__cr3_val) : "memory");
 }
-#endif
 
 #ifdef __cplusplus
 }
 #endif
 
+#undef __LPTRINT_TYPE__
+
 #undef __DEFAULT_FN_ATTRS
 
 #endif /* __INTRIN_H */
diff --git a/linux-x86/lib64/clang/10.0.1/include/inttypes.h b/linux-x86/lib64/clang/10.0.5/include/inttypes.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/inttypes.h
rename to linux-x86/lib64/clang/10.0.5/include/inttypes.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/invpcidintrin.h b/linux-x86/lib64/clang/10.0.5/include/invpcidintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/invpcidintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/invpcidintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/iso646.h b/linux-x86/lib64/clang/10.0.5/include/iso646.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/iso646.h
rename to linux-x86/lib64/clang/10.0.5/include/iso646.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/limits.h b/linux-x86/lib64/clang/10.0.5/include/limits.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/limits.h
rename to linux-x86/lib64/clang/10.0.5/include/limits.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/lwpintrin.h b/linux-x86/lib64/clang/10.0.5/include/lwpintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/lwpintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/lwpintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/lzcntintrin.h b/linux-x86/lib64/clang/10.0.5/include/lzcntintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/lzcntintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/lzcntintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/mm3dnow.h b/linux-x86/lib64/clang/10.0.5/include/mm3dnow.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/mm3dnow.h
rename to linux-x86/lib64/clang/10.0.5/include/mm3dnow.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/mm_malloc.h b/linux-x86/lib64/clang/10.0.5/include/mm_malloc.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/mm_malloc.h
rename to linux-x86/lib64/clang/10.0.5/include/mm_malloc.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/mmintrin.h b/linux-x86/lib64/clang/10.0.5/include/mmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/mmintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/mmintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/module.modulemap b/linux-x86/lib64/clang/10.0.5/include/module.modulemap
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/module.modulemap
rename to linux-x86/lib64/clang/10.0.5/include/module.modulemap
diff --git a/linux-x86/lib64/clang/10.0.1/include/movdirintrin.h b/linux-x86/lib64/clang/10.0.5/include/movdirintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/movdirintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/movdirintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/msa.h b/linux-x86/lib64/clang/10.0.5/include/msa.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/msa.h
rename to linux-x86/lib64/clang/10.0.5/include/msa.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/mwaitxintrin.h b/linux-x86/lib64/clang/10.0.5/include/mwaitxintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/mwaitxintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/mwaitxintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/nmmintrin.h b/linux-x86/lib64/clang/10.0.5/include/nmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/nmmintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/nmmintrin.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/omp-tools.h b/linux-x86/lib64/clang/10.0.5/include/omp-tools.h
similarity index 100%
copy from darwin-x86/lib64/clang/10.0.1/include/omp-tools.h
copy to linux-x86/lib64/clang/10.0.5/include/omp-tools.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/omp.h b/linux-x86/lib64/clang/10.0.5/include/omp.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/omp.h
rename to linux-x86/lib64/clang/10.0.5/include/omp.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/ompt.h b/linux-x86/lib64/clang/10.0.5/include/ompt.h
similarity index 100%
copy from darwin-x86/lib64/clang/10.0.1/include/ompt.h
copy to linux-x86/lib64/clang/10.0.5/include/ompt.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/opencl-c-base.h b/linux-x86/lib64/clang/10.0.5/include/opencl-c-base.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/opencl-c-base.h
rename to linux-x86/lib64/clang/10.0.5/include/opencl-c-base.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/opencl-c.h b/linux-x86/lib64/clang/10.0.5/include/opencl-c.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/opencl-c.h
rename to linux-x86/lib64/clang/10.0.5/include/opencl-c.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/openmp_wrappers/__clang_openmp_math.h b/linux-x86/lib64/clang/10.0.5/include/openmp_wrappers/__clang_openmp_math.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/openmp_wrappers/__clang_openmp_math.h
rename to linux-x86/lib64/clang/10.0.5/include/openmp_wrappers/__clang_openmp_math.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/openmp_wrappers/__clang_openmp_math_declares.h b/linux-x86/lib64/clang/10.0.5/include/openmp_wrappers/__clang_openmp_math_declares.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/openmp_wrappers/__clang_openmp_math_declares.h
rename to linux-x86/lib64/clang/10.0.5/include/openmp_wrappers/__clang_openmp_math_declares.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/openmp_wrappers/cmath b/linux-x86/lib64/clang/10.0.5/include/openmp_wrappers/cmath
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/openmp_wrappers/cmath
rename to linux-x86/lib64/clang/10.0.5/include/openmp_wrappers/cmath
diff --git a/linux-x86/lib64/clang/10.0.1/include/openmp_wrappers/math.h b/linux-x86/lib64/clang/10.0.5/include/openmp_wrappers/math.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/openmp_wrappers/math.h
rename to linux-x86/lib64/clang/10.0.5/include/openmp_wrappers/math.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/pconfigintrin.h b/linux-x86/lib64/clang/10.0.5/include/pconfigintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/pconfigintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/pconfigintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/pkuintrin.h b/linux-x86/lib64/clang/10.0.5/include/pkuintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/pkuintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/pkuintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/pmmintrin.h b/linux-x86/lib64/clang/10.0.5/include/pmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/pmmintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/pmmintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/popcntintrin.h b/linux-x86/lib64/clang/10.0.5/include/popcntintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/popcntintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/popcntintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/ppc_wrappers/emmintrin.h b/linux-x86/lib64/clang/10.0.5/include/ppc_wrappers/emmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/ppc_wrappers/emmintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/ppc_wrappers/emmintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/ppc_wrappers/mm_malloc.h b/linux-x86/lib64/clang/10.0.5/include/ppc_wrappers/mm_malloc.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/ppc_wrappers/mm_malloc.h
rename to linux-x86/lib64/clang/10.0.5/include/ppc_wrappers/mm_malloc.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/ppc_wrappers/mmintrin.h b/linux-x86/lib64/clang/10.0.5/include/ppc_wrappers/mmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/ppc_wrappers/mmintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/ppc_wrappers/mmintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/ppc_wrappers/pmmintrin.h b/linux-x86/lib64/clang/10.0.5/include/ppc_wrappers/pmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/ppc_wrappers/pmmintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/ppc_wrappers/pmmintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/ppc_wrappers/smmintrin.h b/linux-x86/lib64/clang/10.0.5/include/ppc_wrappers/smmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/ppc_wrappers/smmintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/ppc_wrappers/smmintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/ppc_wrappers/tmmintrin.h b/linux-x86/lib64/clang/10.0.5/include/ppc_wrappers/tmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/ppc_wrappers/tmmintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/ppc_wrappers/tmmintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/ppc_wrappers/xmmintrin.h b/linux-x86/lib64/clang/10.0.5/include/ppc_wrappers/xmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/ppc_wrappers/xmmintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/ppc_wrappers/xmmintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/prfchwintrin.h b/linux-x86/lib64/clang/10.0.5/include/prfchwintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/prfchwintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/prfchwintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/ptwriteintrin.h b/linux-x86/lib64/clang/10.0.5/include/ptwriteintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/ptwriteintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/ptwriteintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/rdseedintrin.h b/linux-x86/lib64/clang/10.0.5/include/rdseedintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/rdseedintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/rdseedintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/rtmintrin.h b/linux-x86/lib64/clang/10.0.5/include/rtmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/rtmintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/rtmintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/s390intrin.h b/linux-x86/lib64/clang/10.0.5/include/s390intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/s390intrin.h
rename to linux-x86/lib64/clang/10.0.5/include/s390intrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/sanitizer/allocator_interface.h b/linux-x86/lib64/clang/10.0.5/include/sanitizer/allocator_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/sanitizer/allocator_interface.h
rename to linux-x86/lib64/clang/10.0.5/include/sanitizer/allocator_interface.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/asan_interface.h b/linux-x86/lib64/clang/10.0.5/include/sanitizer/asan_interface.h
similarity index 98%
copy from darwin-x86/lib64/clang/10.0.1/include/sanitizer/asan_interface.h
copy to linux-x86/lib64/clang/10.0.5/include/sanitizer/asan_interface.h
index ab2dc97..6af93aa 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/asan_interface.h
+++ b/linux-x86/lib64/clang/10.0.5/include/sanitizer/asan_interface.h
@@ -315,6 +315,10 @@
 /// functions like <c>_exit()</c> and <c>execl()</c>.
 void __asan_handle_no_return(void);
 
+/// Update allocation stack trace for the given allocation to the current stack
+/// trace. Returns 1 if successfull, 0 if not.
+int __asan_update_allocation_context(void* addr);
+
 #ifdef __cplusplus
 }  // extern "C"
 #endif
diff --git a/linux-x86/lib64/clang/10.0.1/include/sanitizer/common_interface_defs.h b/linux-x86/lib64/clang/10.0.5/include/sanitizer/common_interface_defs.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/sanitizer/common_interface_defs.h
rename to linux-x86/lib64/clang/10.0.5/include/sanitizer/common_interface_defs.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/sanitizer/coverage_interface.h b/linux-x86/lib64/clang/10.0.5/include/sanitizer/coverage_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/sanitizer/coverage_interface.h
rename to linux-x86/lib64/clang/10.0.5/include/sanitizer/coverage_interface.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/dfsan_interface.h b/linux-x86/lib64/clang/10.0.5/include/sanitizer/dfsan_interface.h
similarity index 98%
copy from darwin-x86/lib64/clang/10.0.1/include/sanitizer/dfsan_interface.h
copy to linux-x86/lib64/clang/10.0.5/include/sanitizer/dfsan_interface.h
index c189ee5..81546e5 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/dfsan_interface.h
+++ b/linux-x86/lib64/clang/10.0.5/include/sanitizer/dfsan_interface.h
@@ -112,7 +112,7 @@
 }  // extern "C"
 
 template <typename T>
-void dfsan_set_label(dfsan_label label, T &data) {  // NOLINT
+void dfsan_set_label(dfsan_label label, T &data) { // NOLINT
   dfsan_set_label(label, (void *)&data, sizeof(T));
 }
 
diff --git a/linux-x86/lib64/clang/10.0.1/include/sanitizer/hwasan_interface.h b/linux-x86/lib64/clang/10.0.5/include/sanitizer/hwasan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/sanitizer/hwasan_interface.h
rename to linux-x86/lib64/clang/10.0.5/include/sanitizer/hwasan_interface.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/sanitizer/linux_syscall_hooks.h b/linux-x86/lib64/clang/10.0.5/include/sanitizer/linux_syscall_hooks.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/sanitizer/linux_syscall_hooks.h
rename to linux-x86/lib64/clang/10.0.5/include/sanitizer/linux_syscall_hooks.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/sanitizer/lsan_interface.h b/linux-x86/lib64/clang/10.0.5/include/sanitizer/lsan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/sanitizer/lsan_interface.h
rename to linux-x86/lib64/clang/10.0.5/include/sanitizer/lsan_interface.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/sanitizer/msan_interface.h b/linux-x86/lib64/clang/10.0.5/include/sanitizer/msan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/sanitizer/msan_interface.h
rename to linux-x86/lib64/clang/10.0.5/include/sanitizer/msan_interface.h
diff --git a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/netbsd_syscall_hooks.h b/linux-x86/lib64/clang/10.0.5/include/sanitizer/netbsd_syscall_hooks.h
similarity index 97%
copy from darwin-x86/lib64/clang/10.0.1/include/sanitizer/netbsd_syscall_hooks.h
copy to linux-x86/lib64/clang/10.0.5/include/sanitizer/netbsd_syscall_hooks.h
index 27780e0..174b4bf 100644
--- a/darwin-x86/lib64/clang/10.0.1/include/sanitizer/netbsd_syscall_hooks.h
+++ b/linux-x86/lib64/clang/10.0.5/include/sanitizer/netbsd_syscall_hooks.h
@@ -20,8 +20,8 @@
 // DO NOT EDIT! THIS FILE HAS BEEN GENERATED!
 //
 // Generated with: generate_netbsd_syscalls.awk
-// Generated date: 2018-10-30
-// Generated from: syscalls.master,v 1.293 2018/07/31 13:00:13 rjs Exp
+// Generated date: 2019-11-01
+// Generated from: syscalls.master,v 1.296 2019/09/22 22:59:39 christos Exp
 //
 //===----------------------------------------------------------------------===//
 #ifndef SANITIZER_NETBSD_SYSCALL_HOOKS_H
@@ -1839,23 +1839,24 @@
 #define __sanitizer_syscall_post_uuidgen(res, store, count)                    \
   __sanitizer_syscall_post_impl_uuidgen(res, (long long)(store),               \
                                         (long long)(count))
-#define __sanitizer_syscall_pre_getvfsstat(buf, bufsize, flags)                \
-  __sanitizer_syscall_pre_impl_getvfsstat(                                     \
+#define __sanitizer_syscall_pre_compat_90_getvfsstat(buf, bufsize, flags)      \
+  __sanitizer_syscall_pre_impl_compat_90_getvfsstat(                           \
       (long long)(buf), (long long)(bufsize), (long long)(flags))
-#define __sanitizer_syscall_post_getvfsstat(res, buf, bufsize, flags)          \
-  __sanitizer_syscall_post_impl_getvfsstat(                                    \
+#define __sanitizer_syscall_post_compat_90_getvfsstat(res, buf, bufsize,       \
+                                                      flags)                   \
+  __sanitizer_syscall_post_impl_compat_90_getvfsstat(                          \
       res, (long long)(buf), (long long)(bufsize), (long long)(flags))
-#define __sanitizer_syscall_pre_statvfs1(path, buf, flags)                     \
-  __sanitizer_syscall_pre_impl_statvfs1((long long)(path), (long long)(buf),   \
-                                        (long long)(flags))
-#define __sanitizer_syscall_post_statvfs1(res, path, buf, flags)               \
-  __sanitizer_syscall_post_impl_statvfs1(res, (long long)(path),               \
-                                         (long long)(buf), (long long)(flags))
-#define __sanitizer_syscall_pre_fstatvfs1(fd, buf, flags)                      \
-  __sanitizer_syscall_pre_impl_fstatvfs1((long long)(fd), (long long)(buf),    \
-                                         (long long)(flags))
-#define __sanitizer_syscall_post_fstatvfs1(res, fd, buf, flags)                \
-  __sanitizer_syscall_post_impl_fstatvfs1(                                     \
+#define __sanitizer_syscall_pre_compat_90_statvfs1(path, buf, flags)           \
+  __sanitizer_syscall_pre_impl_compat_90_statvfs1(                             \
+      (long long)(path), (long long)(buf), (long long)(flags))
+#define __sanitizer_syscall_post_compat_90_statvfs1(res, path, buf, flags)     \
+  __sanitizer_syscall_post_impl_compat_90_statvfs1(                            \
+      res, (long long)(path), (long long)(buf), (long long)(flags))
+#define __sanitizer_syscall_pre_compat_90_fstatvfs1(fd, buf, flags)            \
+  __sanitizer_syscall_pre_impl_compat_90_fstatvfs1(                            \
+      (long long)(fd), (long long)(buf), (long long)(flags))
+#define __sanitizer_syscall_post_compat_90_fstatvfs1(res, fd, buf, flags)      \
+  __sanitizer_syscall_post_impl_compat_90_fstatvfs1(                           \
       res, (long long)(fd), (long long)(buf), (long long)(flags))
 #define __sanitizer_syscall_pre_compat_30_fhstatvfs1(fhp, buf, flags)          \
   __sanitizer_syscall_pre_impl_compat_30_fhstatvfs1(                           \
@@ -2143,12 +2144,13 @@
 #define __sanitizer_syscall_post___fhopen40(res, fhp, fh_size, flags)          \
   __sanitizer_syscall_post_impl___fhopen40(                                    \
       res, (long long)(fhp), (long long)(fh_size), (long long)(flags))
-#define __sanitizer_syscall_pre___fhstatvfs140(fhp, fh_size, buf, flags)       \
-  __sanitizer_syscall_pre_impl___fhstatvfs140(                                 \
+#define __sanitizer_syscall_pre_compat_90_fhstatvfs1(fhp, fh_size, buf, flags) \
+  __sanitizer_syscall_pre_impl_compat_90_fhstatvfs1(                           \
       (long long)(fhp), (long long)(fh_size), (long long)(buf),                \
       (long long)(flags))
-#define __sanitizer_syscall_post___fhstatvfs140(res, fhp, fh_size, buf, flags) \
-  __sanitizer_syscall_post_impl___fhstatvfs140(                                \
+#define __sanitizer_syscall_post_compat_90_fhstatvfs1(res, fhp, fh_size, buf,  \
+                                                      flags)                   \
+  __sanitizer_syscall_post_impl_compat_90_fhstatvfs1(                          \
       res, (long long)(fhp), (long long)(fh_size), (long long)(buf),           \
       (long long)(flags))
 #define __sanitizer_syscall_pre_compat_50___fhstat40(fhp, fh_size, sb)         \
@@ -2703,6 +2705,53 @@
                                                       clock_id)                \
   __sanitizer_syscall_post_impl_clock_getcpuclockid2(                          \
       res, (long long)(idtype), (long long)(id), (long long)(clock_id))
+#define __sanitizer_syscall_pre___getvfsstat90(buf, bufsize, flags)            \
+  __sanitizer_syscall_pre_impl___getvfsstat90(                                 \
+      (long long)(buf), (long long)(bufsize), (long long)(flags))
+#define __sanitizer_syscall_post___getvfsstat90(res, buf, bufsize, flags)      \
+  __sanitizer_syscall_post_impl___getvfsstat90(                                \
+      res, (long long)(buf), (long long)(bufsize), (long long)(flags))
+#define __sanitizer_syscall_pre___statvfs190(path, buf, flags)                 \
+  __sanitizer_syscall_pre_impl___statvfs190(                                   \
+      (long long)(path), (long long)(buf), (long long)(flags))
+#define __sanitizer_syscall_post___statvfs190(res, path, buf, flags)           \
+  __sanitizer_syscall_post_impl___statvfs190(                                  \
+      res, (long long)(path), (long long)(buf), (long long)(flags))
+#define __sanitizer_syscall_pre___fstatvfs190(fd, buf, flags)                  \
+  __sanitizer_syscall_pre_impl___fstatvfs190(                                  \
+      (long long)(fd), (long long)(buf), (long long)(flags))
+#define __sanitizer_syscall_post___fstatvfs190(res, fd, buf, flags)            \
+  __sanitizer_syscall_post_impl___fstatvfs190(                                 \
+      res, (long long)(fd), (long long)(buf), (long long)(flags))
+#define __sanitizer_syscall_pre___fhstatvfs190(fhp, fh_size, buf, flags)       \
+  __sanitizer_syscall_pre_impl___fhstatvfs190(                                 \
+      (long long)(fhp), (long long)(fh_size), (long long)(buf),                \
+      (long long)(flags))
+#define __sanitizer_syscall_post___fhstatvfs190(res, fhp, fh_size, buf, flags) \
+  __sanitizer_syscall_post_impl___fhstatvfs190(                                \
+      res, (long long)(fhp), (long long)(fh_size), (long long)(buf),           \
+      (long long)(flags))
+
+/* Compat with older releases */
+#define __sanitizer_syscall_pre_getvfsstat                                     \
+  __sanitizer_syscall_pre_compat_90_getvfsstat
+#define __sanitizer_syscall_post_getvfsstat                                    \
+  __sanitizer_syscall_post_compat_90_getvfsstat
+
+#define __sanitizer_syscall_pre_statvfs1                                       \
+  __sanitizer_syscall_pre_compat_90_statvfs1
+#define __sanitizer_syscall_post_statvfs1                                      \
+  __sanitizer_syscall_post_compat_90_statvfs1
+
+#define __sanitizer_syscall_pre_fstatvfs1                                      \
+  __sanitizer_syscall_pre_compat_90_fstatvfs1
+#define __sanitizer_syscall_post_fstatvfs1                                     \
+  __sanitizer_syscall_post_compat_90_fstatvfs1
+
+#define __sanitizer_syscall_pre___fhstatvfs140                                 \
+  __sanitizer_syscall_pre_compat_90_fhstatvfs1
+#define __sanitizer_syscall_post___fhstatvfs140                                \
+  __sanitizer_syscall_post_compat_90_fhstatvfs1
 
 #ifdef __cplusplus
 extern "C" {
@@ -4066,19 +4115,27 @@
 void __sanitizer_syscall_pre_impl_uuidgen(long long store, long long count);
 void __sanitizer_syscall_post_impl_uuidgen(long long res, long long store,
                                            long long count);
-void __sanitizer_syscall_pre_impl_getvfsstat(long long buf, long long bufsize,
-                                             long long flags);
-void __sanitizer_syscall_post_impl_getvfsstat(long long res, long long buf,
-                                              long long bufsize,
-                                              long long flags);
-void __sanitizer_syscall_pre_impl_statvfs1(long long path, long long buf,
-                                           long long flags);
-void __sanitizer_syscall_post_impl_statvfs1(long long res, long long path,
-                                            long long buf, long long flags);
-void __sanitizer_syscall_pre_impl_fstatvfs1(long long fd, long long buf,
-                                            long long flags);
-void __sanitizer_syscall_post_impl_fstatvfs1(long long res, long long fd,
-                                             long long buf, long long flags);
+void __sanitizer_syscall_pre_impl_compat_90_getvfsstat(long long buf,
+                                                       long long bufsize,
+                                                       long long flags);
+void __sanitizer_syscall_post_impl_compat_90_getvfsstat(long long res,
+                                                        long long buf,
+                                                        long long bufsize,
+                                                        long long flags);
+void __sanitizer_syscall_pre_impl_compat_90_statvfs1(long long path,
+                                                     long long buf,
+                                                     long long flags);
+void __sanitizer_syscall_post_impl_compat_90_statvfs1(long long res,
+                                                      long long path,
+                                                      long long buf,
+                                                      long long flags);
+void __sanitizer_syscall_pre_impl_compat_90_fstatvfs1(long long fd,
+                                                      long long buf,
+                                                      long long flags);
+void __sanitizer_syscall_post_impl_compat_90_fstatvfs1(long long res,
+                                                       long long fd,
+                                                       long long buf,
+                                                       long long flags);
 void __sanitizer_syscall_pre_impl_compat_30_fhstatvfs1(long long fhp,
                                                        long long buf,
                                                        long long flags);
@@ -4304,14 +4361,15 @@
 void __sanitizer_syscall_post_impl___fhopen40(long long res, long long fhp,
                                               long long fh_size,
                                               long long flags);
-void __sanitizer_syscall_pre_impl___fhstatvfs140(long long fhp,
-                                                 long long fh_size,
-                                                 long long buf,
-                                                 long long flags);
-void __sanitizer_syscall_post_impl___fhstatvfs140(long long res, long long fhp,
-                                                  long long fh_size,
-                                                  long long buf,
-                                                  long long flags);
+void __sanitizer_syscall_pre_impl_compat_90_fhstatvfs1(long long fhp,
+                                                       long long fh_size,
+                                                       long long buf,
+                                                       long long flags);
+void __sanitizer_syscall_post_impl_compat_90_fhstatvfs1(long long res,
+                                                        long long fhp,
+                                                        long long fh_size,
+                                                        long long buf,
+                                                        long long flags);
 void __sanitizer_syscall_pre_impl_compat_50___fhstat40(long long fhp,
                                                        long long fh_size,
                                                        long long sb);
@@ -4721,6 +4779,29 @@
                                                         long long idtype,
                                                         long long id,
                                                         long long clock_id);
+void __sanitizer_syscall_pre_impl___getvfsstat90(long long buf,
+                                                 long long bufsize,
+                                                 long long flags);
+void __sanitizer_syscall_post_impl___getvfsstat90(long long res, long long buf,
+                                                  long long bufsize,
+                                                  long long flags);
+void __sanitizer_syscall_pre_impl___statvfs190(long long path, long long buf,
+                                               long long flags);
+void __sanitizer_syscall_post_impl___statvfs190(long long res, long long path,
+                                                long long buf, long long flags);
+void __sanitizer_syscall_pre_impl___fstatvfs190(long long fd, long long buf,
+                                                long long flags);
+void __sanitizer_syscall_post_impl___fstatvfs190(long long res, long long fd,
+                                                 long long buf,
+                                                 long long flags);
+void __sanitizer_syscall_pre_impl___fhstatvfs190(long long fhp,
+                                                 long long fh_size,
+                                                 long long buf,
+                                                 long long flags);
+void __sanitizer_syscall_post_impl___fhstatvfs190(long long res, long long fhp,
+                                                  long long fh_size,
+                                                  long long buf,
+                                                  long long flags);
 
 #ifdef __cplusplus
 } // extern "C"
diff --git a/linux-x86/lib64/clang/10.0.1/include/sanitizer/scudo_interface.h b/linux-x86/lib64/clang/10.0.5/include/sanitizer/scudo_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/sanitizer/scudo_interface.h
rename to linux-x86/lib64/clang/10.0.5/include/sanitizer/scudo_interface.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/sanitizer/tsan_interface.h b/linux-x86/lib64/clang/10.0.5/include/sanitizer/tsan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/sanitizer/tsan_interface.h
rename to linux-x86/lib64/clang/10.0.5/include/sanitizer/tsan_interface.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/sanitizer/tsan_interface_atomic.h b/linux-x86/lib64/clang/10.0.5/include/sanitizer/tsan_interface_atomic.h
similarity index 98%
rename from linux-x86/lib64/clang/10.0.1/include/sanitizer/tsan_interface_atomic.h
rename to linux-x86/lib64/clang/10.0.5/include/sanitizer/tsan_interface_atomic.h
index 9ce0411..8052bc1 100644
--- a/linux-x86/lib64/clang/10.0.1/include/sanitizer/tsan_interface_atomic.h
+++ b/linux-x86/lib64/clang/10.0.5/include/sanitizer/tsan_interface_atomic.h
@@ -17,10 +17,10 @@
 extern "C" {
 #endif
 
-typedef char     __tsan_atomic8;
-typedef short    __tsan_atomic16;  // NOLINT
-typedef int      __tsan_atomic32;
-typedef long     __tsan_atomic64;  // NOLINT
+typedef char __tsan_atomic8;
+typedef short __tsan_atomic16;
+typedef int __tsan_atomic32;
+typedef long __tsan_atomic64;
 #if defined(__SIZEOF_INT128__) \
     || (__clang_major__ * 100 + __clang_minor__ >= 302)
 __extension__ typedef __int128 __tsan_atomic128;
diff --git a/linux-x86/lib64/clang/10.0.5/include/sanitizer/ubsan_interface.h b/linux-x86/lib64/clang/10.0.5/include/sanitizer/ubsan_interface.h
new file mode 100644
index 0000000..59fc6c3
--- /dev/null
+++ b/linux-x86/lib64/clang/10.0.5/include/sanitizer/ubsan_interface.h
@@ -0,0 +1,32 @@
+//===-- sanitizer/ubsan_interface.h -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of UBSanitizer (UBSan).
+//
+// Public interface header.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_UBSAN_INTERFACE_H
+#define SANITIZER_UBSAN_INTERFACE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/// User-provided default option settings.
+///
+/// You can provide your own implementation of this function to return a string
+/// containing UBSan runtime options (for example,
+/// <c>verbosity=1:halt_on_error=0</c>).
+///
+/// \returns Default options string.
+const char* __ubsan_default_options(void);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // SANITIZER_UBSAN_INTERFACE_H
diff --git a/linux-x86/lib64/clang/10.0.1/include/sgxintrin.h b/linux-x86/lib64/clang/10.0.5/include/sgxintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/sgxintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/sgxintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/shaintrin.h b/linux-x86/lib64/clang/10.0.5/include/shaintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/shaintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/shaintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/smmintrin.h b/linux-x86/lib64/clang/10.0.5/include/smmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/smmintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/smmintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/stdalign.h b/linux-x86/lib64/clang/10.0.5/include/stdalign.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/stdalign.h
rename to linux-x86/lib64/clang/10.0.5/include/stdalign.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/stdarg.h b/linux-x86/lib64/clang/10.0.5/include/stdarg.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/stdarg.h
rename to linux-x86/lib64/clang/10.0.5/include/stdarg.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/stdatomic.h b/linux-x86/lib64/clang/10.0.5/include/stdatomic.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/stdatomic.h
rename to linux-x86/lib64/clang/10.0.5/include/stdatomic.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/stdbool.h b/linux-x86/lib64/clang/10.0.5/include/stdbool.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/stdbool.h
rename to linux-x86/lib64/clang/10.0.5/include/stdbool.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/stddef.h b/linux-x86/lib64/clang/10.0.5/include/stddef.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/stddef.h
rename to linux-x86/lib64/clang/10.0.5/include/stddef.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/stdint.h b/linux-x86/lib64/clang/10.0.5/include/stdint.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/stdint.h
rename to linux-x86/lib64/clang/10.0.5/include/stdint.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/stdnoreturn.h b/linux-x86/lib64/clang/10.0.5/include/stdnoreturn.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/stdnoreturn.h
rename to linux-x86/lib64/clang/10.0.5/include/stdnoreturn.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/tbmintrin.h b/linux-x86/lib64/clang/10.0.5/include/tbmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/tbmintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/tbmintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/tgmath.h b/linux-x86/lib64/clang/10.0.5/include/tgmath.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/tgmath.h
rename to linux-x86/lib64/clang/10.0.5/include/tgmath.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/tmmintrin.h b/linux-x86/lib64/clang/10.0.5/include/tmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/tmmintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/tmmintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/unwind.h b/linux-x86/lib64/clang/10.0.5/include/unwind.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/unwind.h
rename to linux-x86/lib64/clang/10.0.5/include/unwind.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/vadefs.h b/linux-x86/lib64/clang/10.0.5/include/vadefs.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/vadefs.h
rename to linux-x86/lib64/clang/10.0.5/include/vadefs.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/vaesintrin.h b/linux-x86/lib64/clang/10.0.5/include/vaesintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/vaesintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/vaesintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/varargs.h b/linux-x86/lib64/clang/10.0.5/include/varargs.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/varargs.h
rename to linux-x86/lib64/clang/10.0.5/include/varargs.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/vecintrin.h b/linux-x86/lib64/clang/10.0.5/include/vecintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/vecintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/vecintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/vpclmulqdqintrin.h b/linux-x86/lib64/clang/10.0.5/include/vpclmulqdqintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/vpclmulqdqintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/vpclmulqdqintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/waitpkgintrin.h b/linux-x86/lib64/clang/10.0.5/include/waitpkgintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/waitpkgintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/waitpkgintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/wbnoinvdintrin.h b/linux-x86/lib64/clang/10.0.5/include/wbnoinvdintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/wbnoinvdintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/wbnoinvdintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/wmmintrin.h b/linux-x86/lib64/clang/10.0.5/include/wmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/wmmintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/wmmintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/x86intrin.h b/linux-x86/lib64/clang/10.0.5/include/x86intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/x86intrin.h
rename to linux-x86/lib64/clang/10.0.5/include/x86intrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/xmmintrin.h b/linux-x86/lib64/clang/10.0.5/include/xmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/xmmintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/xmmintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/xopintrin.h b/linux-x86/lib64/clang/10.0.5/include/xopintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/xopintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/xopintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/xsavecintrin.h b/linux-x86/lib64/clang/10.0.5/include/xsavecintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/xsavecintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/xsavecintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/xsaveintrin.h b/linux-x86/lib64/clang/10.0.5/include/xsaveintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/xsaveintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/xsaveintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/xsaveoptintrin.h b/linux-x86/lib64/clang/10.0.5/include/xsaveoptintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/xsaveoptintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/xsaveoptintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/xsavesintrin.h b/linux-x86/lib64/clang/10.0.5/include/xsavesintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/xsavesintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/xsavesintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/include/xtestintrin.h b/linux-x86/lib64/clang/10.0.5/include/xtestintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/include/xtestintrin.h
rename to linux-x86/lib64/clang/10.0.5/include/xtestintrin.h
diff --git a/linux-x86/lib64/clang/10.0.1/share/asan_blacklist.txt b/linux-x86/lib64/clang/10.0.5/share/asan_blacklist.txt
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/share/asan_blacklist.txt
rename to linux-x86/lib64/clang/10.0.5/share/asan_blacklist.txt
diff --git a/linux-x86/lib64/clang/10.0.1/share/cfi_blacklist.txt b/linux-x86/lib64/clang/10.0.5/share/cfi_blacklist.txt
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/share/cfi_blacklist.txt
rename to linux-x86/lib64/clang/10.0.5/share/cfi_blacklist.txt
diff --git a/linux-x86/lib64/clang/10.0.1/share/dfsan_abilist.txt b/linux-x86/lib64/clang/10.0.5/share/dfsan_abilist.txt
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/share/dfsan_abilist.txt
rename to linux-x86/lib64/clang/10.0.5/share/dfsan_abilist.txt
diff --git a/linux-x86/lib64/clang/10.0.1/share/hwasan_blacklist.txt b/linux-x86/lib64/clang/10.0.5/share/hwasan_blacklist.txt
similarity index 100%
rename from linux-x86/lib64/clang/10.0.1/share/hwasan_blacklist.txt
rename to linux-x86/lib64/clang/10.0.5/share/hwasan_blacklist.txt
diff --git a/linux-x86/lib64/clang/10.0.1/share/msan_blacklist.txt b/linux-x86/lib64/clang/10.0.5/share/msan_blacklist.txt
similarity index 79%
rename from linux-x86/lib64/clang/10.0.1/share/msan_blacklist.txt
rename to linux-x86/lib64/clang/10.0.5/share/msan_blacklist.txt
index 44a5680..3efef57 100644
--- a/linux-x86/lib64/clang/10.0.1/share/msan_blacklist.txt
+++ b/linux-x86/lib64/clang/10.0.5/share/msan_blacklist.txt
@@ -5,3 +5,6 @@
 # Example usage:
 # fun:*bad_function_name*
 # src:file_with_tricky_code.cc
+
+# https://bugs.llvm.org/show_bug.cgi?id=31877
+fun:__gxx_personality_*
diff --git a/linux-x86/lib64/libLLVM-10git.so b/linux-x86/lib64/libLLVM-10git.so
new file mode 100644
index 0000000..0042473
--- /dev/null
+++ b/linux-x86/lib64/libLLVM-10git.so
Binary files differ
diff --git a/linux-x86/lib64/libLLVM-10svn.so b/linux-x86/lib64/libLLVM-10svn.so
deleted file mode 100644
index f1d842b..0000000
--- a/linux-x86/lib64/libLLVM-10svn.so
+++ /dev/null
Binary files differ
diff --git a/linux-x86/lib64/libbase.so b/linux-x86/lib64/libbase.so
index 4497673..05a1617 100755
--- a/linux-x86/lib64/libbase.so
+++ b/linux-x86/lib64/libbase.so
Binary files differ
diff --git a/linux-x86/lib64/libc++.so b/linux-x86/lib64/libc++.so
index 15bd1b4..a59b307 100755
--- a/linux-x86/lib64/libc++.so
+++ b/linux-x86/lib64/libc++.so
Binary files differ
diff --git a/linux-x86/lib64/libc++.so.1 b/linux-x86/lib64/libc++.so.1
index 0f28ca5..a142db5 100644
--- a/linux-x86/lib64/libc++.so.1
+++ b/linux-x86/lib64/libc++.so.1
Binary files differ
diff --git a/linux-x86/lib64/libclang_cxx.so.10git b/linux-x86/lib64/libclang_cxx.so.10git
new file mode 100644
index 0000000..9250a6a
--- /dev/null
+++ b/linux-x86/lib64/libclang_cxx.so.10git
Binary files differ
diff --git a/linux-x86/lib64/libclang_cxx.so.10svn b/linux-x86/lib64/libclang_cxx.so.10svn
deleted file mode 100644
index b071f5b..0000000
--- a/linux-x86/lib64/libclang_cxx.so.10svn
+++ /dev/null
Binary files differ
diff --git a/linux-x86/lib64/liblog.so b/linux-x86/lib64/liblog.so
index df956b3..2827407 100755
--- a/linux-x86/lib64/liblog.so
+++ b/linux-x86/lib64/liblog.so
Binary files differ
diff --git a/linux-x86/lib64/libprotobuf-cpp-full.so b/linux-x86/lib64/libprotobuf-cpp-full.so
index 88d5e40..f126c27 100755
--- a/linux-x86/lib64/libprotobuf-cpp-full.so
+++ b/linux-x86/lib64/libprotobuf-cpp-full.so
Binary files differ
diff --git a/linux-x86/lib64/libz-host.so b/linux-x86/lib64/libz-host.so
index 8d8d0af..9849ded 100755
--- a/linux-x86/lib64/libz-host.so
+++ b/linux-x86/lib64/libz-host.so
Binary files differ
diff --git a/linux-x86/lib64/libziparchive.so b/linux-x86/lib64/libziparchive.so
index 1cfb486..55e9fb2 100755
--- a/linux-x86/lib64/libziparchive.so
+++ b/linux-x86/lib64/libziparchive.so
Binary files differ
diff --git a/manifest.xml b/manifest.xml
index d625851..4c38c42 100644
--- a/manifest.xml
+++ b/manifest.xml
@@ -5,13 +5,13 @@
 
   <default remote="aosp" revision="master" />
 
-  <project name="platform/development" path="development" revision="719f925df030d3e8890e36cef694a6cbe40e9cde" />
+  <project name="platform/development" path="development" revision="850d3a144162d83bf18f2a10b5fd978d9e6b9f6e" />
 
-  <project clone-depth="1" name="platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8" path="prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8" revision="f26a67a6c141c94f84bd497f2c317dd1658fe99a" />
+  <project clone-depth="1" name="platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8" path="prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8" revision="4533a4d0efa68949dc3c0dfec00214feaddd6c7b" />
 
   <project name="platform/external/go-creachadair-stringset" path="external/go-creachadair-stringset" revision="580fe893c0cd9bc1cb41a735b6ff456901f00e83" />
 
-  <project name="platform/build/soong" path="build/soong" revision="29fd971526d8ea8a2d76459d4d40be9ffc5fefaa">
+  <project name="platform/build/soong" path="build/soong" revision="85f227eed73d98cff60946eb7a450f53e9d362d0">
     <linkfile dest="Android.bp" src="root.bp" />
 
     <linkfile dest="bootstrap.bash" src="bootstrap.bash" />
@@ -19,41 +19,41 @@
 
   <project name="platform/external/ninja" path="external/ninja" revision="e5a9d70b45ebecf68a8aec37edf5857b4d635c4e" />
 
-  <project name="platform/build/blueprint" path="build/blueprint" revision="5b1a877f0a18a55cab0142e90155fdbb488ff9ca" />
+  <project name="platform/build/blueprint" path="build/blueprint" revision="7a56608b919834534c714702ddedc6044eb18013" />
 
   <project name="platform/external/rapidjson" path="external/rapidjson" revision="9fa2a3d9e356a1f42a6184dcf1e0508ddfa9dbfb" />
 
-  <project name="platform/external/jsoncpp" path="external/jsoncpp" revision="34ba61cd2766f63917262655c46f4075a3c14712" />
+  <project name="platform/external/jsoncpp" path="external/jsoncpp" revision="870657e4a28fc426a158254ed49c5f4cbdc690c0" />
 
-  <project name="platform/external/googletest" path="external/googletest" revision="9de92567acd0fa0a81804c7686a05cd04538da88" />
+  <project name="platform/external/googletest" path="external/googletest" revision="739d078a704963f0d19345bc6a3491b8b8232b9c" />
 
   <project name="platform/external/regex-re2" path="external/regex-re2" revision="84e28962b2c2f357b5daccb460501b169193fafe" />
 
-  <project clone-depth="1" name="platform/prebuilts/build-tools" path="prebuilts/build-tools" revision="864ab14cac6360dad83230ee9647c3a1247ffa90" />
+  <project clone-depth="1" name="platform/prebuilts/build-tools" path="prebuilts/build-tools" revision="6327989d4a9236f59b0db23d43dfeab8ab304d92" />
 
-  <project name="platform/bionic" path="bionic" revision="9354f3e9a96dd0292c358d8e1afb6ef9b329be79" />
+  <project name="platform/bionic" path="bionic" revision="2a069b24df1efffde894b186a669b8b95d4f1fc2" />
 
-  <project clone-depth="1" groups="linux" name="platform/prebuilts/go/linux-x86" path="prebuilts/go/linux-x86" revision="537e2072e1affb379b4da1ecfbcb41a270544d1a" />
+  <project clone-depth="1" groups="linux" name="platform/prebuilts/go/linux-x86" path="prebuilts/go/linux-x86" revision="3affa1d2632b48f9589ab4c91ca1b825d7c73a87" />
 
-  <project name="platform/external/zlib" path="external/zlib" revision="3b3f8457d009811b98f368b07a7e220077c5b5f4" />
+  <project name="platform/external/zlib" path="external/zlib" revision="7dfceb664ec662810451feb60912bdf8068ea81a" />
 
   <project name="platform/external/golang-x-tools" path="external/golang-x-tools" revision="bcc6484babb4e999e4bdb6a982bfa7e92fc640d2" />
 
-  <project name="platform/external/zopfli" path="external/zopfli" revision="9bdeb8615dc9a4eda08f72c1f81479075a4cb425" />
+  <project name="platform/external/zopfli" path="external/zopfli" revision="7723d6077849ae5c66e105b232ddfbfc11faa09b" />
 
-  <project clone-depth="1" name="platform/prebuilts/jdk/jdk11" path="prebuilts/jdk/jdk11" revision="38b397ddfe0c3339623cec843c810bd938de9a89" />
+  <project clone-depth="1" name="platform/prebuilts/jdk/jdk11" path="prebuilts/jdk/jdk11" revision="bffdb0223ff2898246bbd14a6b17d8920ac600de" />
 
-  <project name="platform/external/kythe" path="external/kythe" revision="645491397c0f0a61c6a514588b0348a305ed0fa9" />
+  <project name="platform/external/kythe" path="external/kythe" revision="c5c402d8d43c420ed166b0a5c2dcab4d79273bd8" />
 
-  <project name="platform/external/libcxx" path="external/libcxx" revision="5488d3c9611cb81029edb1ca30af0e15f743f0f7" />
+  <project name="platform/external/libcxx" path="external/libcxx" revision="2d6d9ab848d7e0490dfdea52e5ec15beca9a74d8" />
 
-  <project groups="pdk" name="platform/external/boringssl" path="external/boringssl" revision="2d2941f4a1fc64b71b81b8c134be9b88c012cee4" />
+  <project groups="pdk" name="platform/external/boringssl" path="external/boringssl" revision="0dcd4e8262f521f08a8302051ad6c81489f05754" />
 
   <project clone-depth="1" groups="linux" name="platform/prebuilts/ninja/linux-x86" path="prebuilts/ninja/linux-x86" revision="6369b19fc3fbe765636af75d394627e2b92599ed" />
 
-  <project name="platform/external/clang" path="external/clang" revision="4c9ce042c54a4bccb4f0fc36e605e3df5be5a296" />
+  <project name="platform/external/clang" path="external/clang" revision="f482f355345a3e42e41be48861075aeef690a78d" />
 
-  <project name="platform/external/libcxxabi" path="external/libcxxabi" revision="297604288eab6ab8d5488d51e0a33eb369595f5d" />
+  <project name="platform/external/libcxxabi" path="external/libcxxabi" revision="9ab5390e62c8a2787bf6f9ad5be927e133c151bb" />
 
   <project clone-depth="1" name="platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8" path="prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8" revision="d9aafaade740ca38612c742f6d87debf362132ea" />
 
@@ -61,53 +61,53 @@
 
   <project clone-depth="1" groups="darwin" name="platform/prebuilts/gcc/darwin-x86/host/headers" path="prebuilts/gcc/darwin-x86/host/headers" revision="4ac4f7cc41cf3c9e36fc3d6cf37fd1cfa9587a68" />
 
-  <project name="platform/external/libunwind" path="external/libunwind" revision="2f060ec263cd6fa3ea56140c84439d08e360621d" />
+  <project name="platform/external/libunwind" path="external/libunwind" revision="595f4a2c04419f3c004c39671a551868165bc1f5" />
 
-  <project name="platform/system/core" path="system/core" revision="d7c1bc73dc5b4e43b8288d43052a8b8890c4bf5a" />
+  <project name="platform/system/core" path="system/core" revision="3ab681c9a8a7de8b7f004b9e6afec749cdcbd5e5" />
 
-  <project clone-depth="1" groups="linux" name="platform/prebuilts/clang/host/linux-x86" path="prebuilts/clang/host/linux-x86" revision="280c4da2f19c6e72ff0e127c99ef379df051570f" />
+  <project clone-depth="1" groups="linux" name="platform/prebuilts/clang/host/linux-x86" path="prebuilts/clang/host/linux-x86" revision="ab0d33f38cde1018896119c02392fadc1b97fa15" />
 
-  <project clone-depth="3" name="platform/prebuilts/clang-tools" path="prebuilts/clang-tools" revision="f88257a0d9def40c153e05064d734878ea952989" />
+  <project clone-depth="3" name="platform/prebuilts/clang-tools" path="prebuilts/clang-tools" revision="58d9f7e35de5e35249747223c1acc573d6c08581" />
 
-  <project name="platform/external/golang-protobuf" path="external/golang-protobuf" revision="fcdf38bba2f334cda94ca7a01fef5b3a55307deb" />
+  <project name="platform/external/golang-protobuf" path="external/golang-protobuf" revision="e183852c920ef6c36d9fd89a8c4233e98241c776" />
 
   <project clone-depth="1" name="platform/prebuilts/jdk/jdk9" path="prebuilts/jdk/jdk9" revision="3447ef32fa4961229942fa345c8707c7f9e00246" />
 
-  <project clone-depth="1" groups="darwin" name="platform/prebuilts/gcc/darwin-x86/host/i686-apple-darwin-4.2.1" path="prebuilts/gcc/darwin-x86/host/i686-apple-darwin-4.2.1" revision="324e47fb5c8ab208c31e8551ac5e95043c20b095" />
+  <project clone-depth="1" groups="darwin" name="platform/prebuilts/gcc/darwin-x86/host/i686-apple-darwin-4.2.1" path="prebuilts/gcc/darwin-x86/host/i686-apple-darwin-4.2.1" revision="b616fc20ee024ace07f39b13a1f5a2bbdb67f8f7" />
 
-  <project name="platform/external/python/cpython3" path="external/python/cpython3" revision="ee44d9aa17a3e155ec1182a72f9dc94ee3bda2f2" />
+  <project name="platform/external/python/cpython3" path="external/python/cpython3" revision="37ac67f501dc80fd96f51a6bf98d4ef32e2e580a" />
 
-  <project name="platform/external/compiler-rt" path="external/compiler-rt" revision="c72b929327e09ae6c482ff6d091f191c64044c43" />
+  <project name="platform/external/compiler-rt" path="external/compiler-rt" revision="54b43ff2d8e92841cb46f2162f4fa1489719ff27" />
 
   <project name="platform/external/golang-x-sync" path="external/golang-x-sync" revision="bf7c9e83a86ea08d93262ec468a53e866728e36a" />
 
-  <project name="platform/external/libunwind_llvm" path="external/libunwind_llvm" revision="2bdeda5ee1210f54bbaac9a536b95c4c3a250d8b" />
+  <project name="platform/external/libunwind_llvm" path="external/libunwind_llvm" revision="98d90944b3ef000dd6716c57a480f52a6726749d" />
 
-  <project name="platform/external/fmtlib" path="external/fmtlib" revision="cd5c038fbaba4a4f85c193f0b39af36c3e5d4528" />
+  <project name="platform/external/fmtlib" path="external/fmtlib" revision="a9d1870c85c52a9f64addd7a8115bc616c439525" />
 
-  <project name="platform/external/llvm" path="external/llvm" revision="2eb9175e4da7fa7bea4e1fa47bcca2480e270374" />
+  <project name="platform/external/llvm" path="external/llvm" revision="c10415f4fb599cbb121fdd4919014b4801e9e0cc" />
 
-  <project groups="pdk" name="platform/external/gflags" path="external/gflags" revision="ab750f5fe55da2b363ab745908fe2673967610e9" />
+  <project groups="pdk" name="platform/external/gflags" path="external/gflags" revision="f1557386763cb21d266f6ef90f58c074886b54f4" />
 
-  <project name="platform/build" path="build/make" revision="d0511cfbbbdff60e77d59218be162bf471d8976f">
+  <project name="platform/build" path="build/make" revision="d530443e8204154437f11617fd842c550c21ecf5">
     <linkfile dest="build/tools" src="tools" />
 </project>
 
-  <project clone-depth="1" name="platform/prebuilts/misc" path="prebuilts/misc" revision="17e050632085face44b5646052bc3e50ab166942" />
+  <project clone-depth="1" name="platform/prebuilts/misc" path="prebuilts/misc" revision="d635463bac1eff2564dfe59319ce25e610288c1c" />
 
-  <project clone-depth="1" groups="darwin" name="platform/prebuilts/clang/host/darwin-x86" path="prebuilts/clang/host/darwin-x86" revision="39e879aa63c1af9b38b67982b8234200b1da3c62" />
+  <project clone-depth="1" groups="darwin" name="platform/prebuilts/clang/host/darwin-x86" path="prebuilts/clang/host/darwin-x86" revision="ba4bf5f6f4b8c1ff33ce7188a52f07d514d9fb71" />
 
-  <project name="platform/external/protobuf" path="external/protobuf" revision="7a345e6d565f8718380f8ed554af67711ccbe02e" />
+  <project name="platform/external/protobuf" path="external/protobuf" revision="247616f2154f29f8cd1894dec24452fafa9563fd" />
 
   <project clone-depth="1" name="platform/prebuilts/jdk/jdk8" path="prebuilts/jdk/jdk8" revision="74e4f1844dfa9b8df9e0fe2ff34a2ecc24d52b07" />
 
   <project clone-depth="1" groups="darwin" name="platform/prebuilts/ninja/darwin-x86" path="prebuilts/ninja/darwin-x86" revision="00f798346dedb4a7a6a6dcc9ad32ff09d66ee0db" />
 
-  <project name="platform/external/python/cpython2" path="external/python/cpython2" revision="96459324c3f633af4b35c29a16ac25aeb7894a7d" />
+  <project name="platform/external/python/cpython2" path="external/python/cpython2" revision="edba983036eab94d6c8eb211198f5ae5a874b7b0" />
 
-  <project name="platform/dalvik" path="dalvik" revision="54cc4ca1beb7eba4509005553e89ad1ea4528812" />
+  <project name="platform/dalvik" path="dalvik" revision="1537e27bdc17e87c45e6269546f45ed7fbb91d19" />
 
   <project name="platform/external/abseil-cpp" path="external/abseil-cpp" revision="c46f2ec91c41d06f7731120c061de98fd2b86b95" />
 
-  <project clone-depth="1" groups="darwin" name="platform/prebuilts/go/darwin-x86" path="prebuilts/go/darwin-x86" revision="1a68b8d2b99293ce55cd444bb764abdd29100463" />
+  <project clone-depth="1" groups="darwin" name="platform/prebuilts/go/darwin-x86" path="prebuilts/go/darwin-x86" revision="3d0e53b900fd8e721733b7d54a1acef694a47333" />
 </manifest>